id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
31,221 | def gh_get_message_command(client: Client, args: Dict[str, Any]) -> CommandResults:
ghid = argToList(args.get('id'))
includeheaders = args.get('includeheaders', "false").lower() == "true"
showalllinks = args.get('showalllinks', "false").lower() == "true"
result = client.search_events({"filters": [{"eventId": ghid}]})
if len(result.get("results", [])) > 0:
message = result.get("results", [None])[0]
envelope = {
"ID": message.get("eventId"),
"Received": message.get("timestamp"),
"Mailbox": message.get("origin"),
"Recipients": message.get("targets"),
"Subject": message.get("subject"),
"Display Name": message.get("displayName"),
"From Address": message.get("source"),
"From Domain": message.get("source").split("@")[-1],
"Reply-To": message.get("replyTo"),
"Return-Path": message.get("sourcePath"),
"IP Address": message.get("ip"),
}
envelope_md = tableToMarkdown("Message Details", envelope, envelope.keys())
authentication = {
"SPF": message.get("spf"),
"DKIM": message.get("dkim"),
"DMARC": message.get("dmarc"),
"Authentication Results": message.get("authenticationResults")
}
authentication_md = tableToMarkdown("Message Authentication", authentication, authentication.keys())
scores = {
"OWL": message.get("owlScore"),
"Sender Anomaly": message.get("anomalyScore"),
"Authenication Risk": message.get("authScore"),
"Name Spoofing": message.get("homographScore")
}
scores_md = tableToMarkdown("Risk Analysis Factors", scores, scores.keys())
links = []
for link in message.get("links"):
link_dict = {
"Text": link.get("text"),
"Url": link.get("url"),
"Tags": ", ".join(link.get("tags", []))
}
if showalllinks:
links.append(link_dict)
else:
if "suspicious" in link_dict['Tags'] or "malicious" in link_dict['Tags']:
links.append(link_dict)
# break
if showalllinks:
links_md = tableToMarkdown("Links", links, ["Text", "Url", "Tags"])
else:
links_md = tableToMarkdown("Suspicious/Malicious Links", links, ["Text", "Url", "Tags"])
files = []
for file in message.get("files"):
f = {
"Name": file.get("fileName"),
"Type": file.get("fileType"),
"SHA256": file.get("fileHash")
}
files.append(f)
files_md = tableToMarkdown("Files", files, ["Name", "Type", "SHA256"])
policies = []
if message.get("flag") is not None:
for policy_id in message.get("flag"):
policy = client.get_policy(policy_id).get("policy", {})
actions = []
for action in policy.get("actions"):
actions.append(action.get("type"))
p = {
"ID": policy.get("id"),
"Name": policy.get("name"),
"Actions": ",".join(actions)
}
policies.append(p)
policies_md = tableToMarkdown("Policies", policies, ["ID", "Name", "Actions"])
headers = []
msgheaders = message.get("headers")
for header in message.get("headers").keys():
h = {
"Name": header,
"Value": msgheaders[header]
}
headers.append(h)
if includeheaders:
headers_md = tableToMarkdown("Headers", headers, ["Name", "Value"])
else:
headers_md = ""
message_md = envelope_md + authentication_md + scores_md + links_md + files_md + policies_md + headers_md
return CommandResults(
readable_output=message_md,
outputs_prefix='GreatHorn.Message',
outputs_key_field='eventId',
outputs=result.get("results", [None])[0]
)
else:
return CommandResults(
readable_output="GreatHorn event not found",
outputs={}
)
| def gh_get_message_command(client: Client, args: Dict[str, Any]) -> CommandResults:
ghid = argToList(args.get('id'))
includeheaders = args.get('includeheaders', "false").lower() == "true"
showalllinks = args.get('showalllinks', "false").lower() == "true"
result = client.search_events({"filters": [{"eventId": ghid}]})
if len(result.get("results", [])) > 0:
message = result.get("results")[0]
envelope = {
"ID": message.get("eventId"),
"Received": message.get("timestamp"),
"Mailbox": message.get("origin"),
"Recipients": message.get("targets"),
"Subject": message.get("subject"),
"Display Name": message.get("displayName"),
"From Address": message.get("source"),
"From Domain": message.get("source").split("@")[-1],
"Reply-To": message.get("replyTo"),
"Return-Path": message.get("sourcePath"),
"IP Address": message.get("ip"),
}
envelope_md = tableToMarkdown("Message Details", envelope, envelope.keys())
authentication = {
"SPF": message.get("spf"),
"DKIM": message.get("dkim"),
"DMARC": message.get("dmarc"),
"Authentication Results": message.get("authenticationResults")
}
authentication_md = tableToMarkdown("Message Authentication", authentication, authentication.keys())
scores = {
"OWL": message.get("owlScore"),
"Sender Anomaly": message.get("anomalyScore"),
"Authenication Risk": message.get("authScore"),
"Name Spoofing": message.get("homographScore")
}
scores_md = tableToMarkdown("Risk Analysis Factors", scores, scores.keys())
links = []
for link in message.get("links"):
link_dict = {
"Text": link.get("text"),
"Url": link.get("url"),
"Tags": ", ".join(link.get("tags", []))
}
if showalllinks:
links.append(link_dict)
else:
if "suspicious" in link_dict['Tags'] or "malicious" in link_dict['Tags']:
links.append(link_dict)
# break
if showalllinks:
links_md = tableToMarkdown("Links", links, ["Text", "Url", "Tags"])
else:
links_md = tableToMarkdown("Suspicious/Malicious Links", links, ["Text", "Url", "Tags"])
files = []
for file in message.get("files"):
f = {
"Name": file.get("fileName"),
"Type": file.get("fileType"),
"SHA256": file.get("fileHash")
}
files.append(f)
files_md = tableToMarkdown("Files", files, ["Name", "Type", "SHA256"])
policies = []
if message.get("flag") is not None:
for policy_id in message.get("flag"):
policy = client.get_policy(policy_id).get("policy", {})
actions = []
for action in policy.get("actions"):
actions.append(action.get("type"))
p = {
"ID": policy.get("id"),
"Name": policy.get("name"),
"Actions": ",".join(actions)
}
policies.append(p)
policies_md = tableToMarkdown("Policies", policies, ["ID", "Name", "Actions"])
headers = []
msgheaders = message.get("headers")
for header in message.get("headers").keys():
h = {
"Name": header,
"Value": msgheaders[header]
}
headers.append(h)
if includeheaders:
headers_md = tableToMarkdown("Headers", headers, ["Name", "Value"])
else:
headers_md = ""
message_md = envelope_md + authentication_md + scores_md + links_md + files_md + policies_md + headers_md
return CommandResults(
readable_output=message_md,
outputs_prefix='GreatHorn.Message',
outputs_key_field='eventId',
outputs=result.get("results", [None])[0]
)
else:
return CommandResults(
readable_output="GreatHorn event not found",
outputs={}
)
|
31,457 | def is_the_only_rn_in_block(release_notes_dir: str, version: str, changelog: dict):
"""
Check if the given version is a key of an aggregated changelog block, as in its value in the changelog
doesn't contains other release notes that have been aggregated in previous uploads.
If that is the case, the adjacent previous release note in the changelog will be equal to the one in the
release notes directory, and this function asserts that.
Args:
release_notes_dir: the path to the release notes dir.
version (str): the wanted version.
changelog (dict): the changelog from the production bucket.
Returns:
True if this version's value in the changelog is not an aggregated release notes block. False otherwise.
"""
if changelog.get(version):
all_rn_versions = []
lowest_version = [LooseVersion('0.0.0')]
for filename in sorted(os.listdir(release_notes_dir)):
_current_version = filename.replace('.md', '')
current_version = _current_version.replace('_', '.')
all_rn_versions.append(LooseVersion(current_version))
lower_versions_all_versions = [item for item in all_rn_versions if item < version] + lowest_version
lower_versions_in_changelog = [LooseVersion(item) for item in changelog.keys() if
LooseVersion(item) < version] + lowest_version
return max(lower_versions_all_versions) == max(lower_versions_in_changelog)
return False
| def is_the_only_rn_in_block(release_notes_dir: str, version: str, changelog: dict):
"""
Check if the given version is a key of an aggregated changelog block, as in its value in the changelog
doesn't contains other release notes that have been aggregated in previous uploads.
If that is the case, the adjacent previous release note in the changelog will be equal to the one in the
release notes directory, and this function asserts that.
Args:
release_notes_dir: the path to the release notes dir.
version (str): the wanted version.
changelog (dict): the changelog from the production bucket.
Returns:
True if this version's value in the changelog is not an aggregated release notes block. False otherwise.
"""
if changelog.get(version):
all_rn_versions = []
lowest_version = [LooseVersion('0.0.0')]
for filename in sorted(os.listdir(release_notes_dir)):
_current_version = filename.replace('.md', '')
current_version = _current_version.replace('_', '.')
all_rn_versions.append(LooseVersion(current_version))
lower_versions_all_versions = [item for item in all_rn_versions if item < version] + lowest_version
lower_versions_in_changelog = [LooseVersion(item) for item in changelog.keys() if
LooseVersion(item) < version]
return max(lower_versions_all_versions) == max(lower_versions_in_changelog)
return False
|
45,816 | def load_image(file_name):
"""Loads the image with OpenCV and converts to torch.Tensor"""
if not os.path.isfile(file_name):
raise AssertionError("Invalid file {}".format(file_name))
# load image with OpenCV
img = cv2.imread(file_name, cv2.IMREAD_COLOR)
# convert image to torch tensor
tensor = dgm.utils.image_to_tensor(img).float() / 255.0
return tensor.view(1, *tensor.shape) # 1xCxHxW
| def load_image(file_name):
"""Loads the image with OpenCV and converts to torch.Tensor"""
if not os.path.isfile(file_name):
raise FileExistsError("Invalid file {}".format(file_name))
# load image with OpenCV
img = cv2.imread(file_name, cv2.IMREAD_COLOR)
# convert image to torch tensor
tensor = dgm.utils.image_to_tensor(img).float() / 255.0
return tensor.view(1, *tensor.shape) # 1xCxHxW
|
34,182 | def create_dir_for_file(file_path: Text) -> None:
"""Creates any missing parent directories of this files path."""
try:
os.makedirs(os.path.dirname(file_path))
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
| def create_dir_for_file(file_path: Text) -> None:
"""Creates any missing parent directories of this file path."""
try:
os.makedirs(os.path.dirname(file_path))
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
|
293 | def fit(
n=10000,
local_rv=None,
method="advi",
model=None,
random_seed=None,
start=None,
inf_kwargs=None,
**kwargs
):
r"""Handy shortcut for using inference methods in functional way
Parameters
----------
n : `int`
number of iterations
local_rv : dict[var->tuple]
mapping {model_variable -> approx params}
Local Vars are used for Autoencoding Variational Bayes
See (AEVB; Kingma and Welling, 2014) for details
method : str or :class:`Inference`
string name is case insensitive in:
- 'advi' for ADVI
- 'fullrank_advi' for FullRankADVI
- 'svgd' for Stein Variational Gradient Descent
- 'asvgd' for Amortized Stein Variational Gradient Descent
- 'nfvi' for Normalizing Flow with default `scale-loc` flow
- 'nfvi=<formula>' for Normalizing Flow using formula
model : :class:`Model`
PyMC3 model for inference
random_seed : None or int
leave None to use package global RandomStream or other
valid value to create instance specific one
inf_kwargs : dict
additional kwargs passed to :class:`Inference`
start : `Point`
starting point for inference
Other Parameters
----------------
score : bool
evaluate loss on each iteration or not
callbacks : list[function : (Approximation, losses, i) -> None]
calls provided functions after each iteration step
progressbar : bool
whether to show progressbar or not
obj_n_mc : `int`
Number of monte carlo samples used for approximation of objective gradients
tf_n_mc : `int`
Number of monte carlo samples used for approximation of test function gradients
obj_optimizer : function (grads, params) -> updates
Optimizer that is used for objective params
test_optimizer : function (grads, params) -> updates
Optimizer that is used for test function params
more_obj_params : `list`
Add custom params for objective optimizer
more_tf_params : `list`
Add custom params for test function optimizer
more_updates : `dict`
Add custom updates to resulting updates
total_grad_norm_constraint : `float`
Bounds gradient norm, prevents exploding gradient problem
fn_kwargs : `dict`
Add kwargs to theano.function (e.g. `{'profile': True}`)
more_replacements : `dict`
Apply custom replacements before calculating gradients
Returns
-------
:class:`Approximation`
"""
if inf_kwargs is None:
inf_kwargs = dict()
else:
inf_kwargs = inf_kwargs.copy()
if local_rv is not None:
inf_kwargs["local_rv"] = local_rv
if random_seed is not None:
inf_kwargs["random_seed"] = random_seed
if start is not None:
inf_kwargs["start"] = start
if model is None:
model = pm.modelcontext(model)
_select = dict(advi=ADVI, fullrank_advi=FullRankADVI, svgd=SVGD, asvgd=ASVGD, nfvi=NFVI)
if isinstance(method, str):
method = method.lower()
if method.startswith("nfvi="):
formula = method[5:]
inference = NFVI(formula, **inf_kwargs)
elif method in _select:
inference = _select[method](model=model, **inf_kwargs)
else:
raise KeyError(
"method should be one of %s " "or Inference instance" % set(_select.keys())
)
elif isinstance(method, Inference):
inference = method
else:
raise TypeError("method should be one of %s " "or Inference instance" % set(_select.keys()))
return inference.fit(n, **kwargs)
| def fit(
n=10000,
local_rv=None,
method="advi",
model=None,
random_seed=None,
start=None,
inf_kwargs=None,
**kwargs
):
r"""Handy shortcut for using inference methods in functional way
Parameters
----------
n : `int`
number of iterations
local_rv : dict[var->tuple]
mapping {model_variable -> approx params}
Local Vars are used for Autoencoding Variational Bayes
See (AEVB; Kingma and Welling, 2014) for details
method : str or :class:`Inference`
string name is case insensitive in:
- 'advi' for ADVI
- 'fullrank_advi' for FullRankADVI
- 'svgd' for Stein Variational Gradient Descent
- 'asvgd' for Amortized Stein Variational Gradient Descent
- 'nfvi' for Normalizing Flow with default `scale-loc` flow
- 'nfvi=<formula>' for Normalizing Flow using formula
model : :class:`Model`
PyMC3 model for inference
random_seed : None or int
leave None to use package global RandomStream or other
valid value to create instance specific one
inf_kwargs : dict
additional kwargs passed to :class:`Inference`
start : `Point`
starting point for inference
Other Parameters
----------------
score : bool
evaluate loss on each iteration or not
callbacks : list[function : (Approximation, losses, i) -> None]
calls provided functions after each iteration step
progressbar : bool
whether to show progressbar or not
obj_n_mc : `int`
Number of monte carlo samples used for approximation of objective gradients
tf_n_mc : `int`
Number of monte carlo samples used for approximation of test function gradients
obj_optimizer : function (grads, params) -> updates
Optimizer that is used for objective params
test_optimizer : function (grads, params) -> updates
Optimizer that is used for test function params
more_obj_params : `list`
Add custom params for objective optimizer
more_tf_params : `list`
Add custom params for test function optimizer
more_updates : `dict`
Add custom updates to resulting updates
total_grad_norm_constraint : `float`
Bounds gradient norm, prevents exploding gradient problem
fn_kwargs : `dict`
Add kwargs to theano.function (e.g. `{'profile': True}`)
more_replacements : `dict`
Apply custom replacements before calculating gradients
Returns
-------
:class:`Approximation`
"""
if inf_kwargs is None:
inf_kwargs = dict()
else:
inf_kwargs = inf_kwargs.copy()
if local_rv is not None:
inf_kwargs["local_rv"] = local_rv
if random_seed is not None:
inf_kwargs["random_seed"] = random_seed
if start is not None:
inf_kwargs["start"] = start
if model is None:
model = pm.modelcontext(model)
_select = dict(advi=ADVI, fullrank_advi=FullRankADVI, svgd=SVGD, asvgd=ASVGD, nfvi=NFVI)
if isinstance(method, str):
method = method.lower()
if method.startswith("nfvi="):
formula = method[5:]
inference = NFVI(formula, **inf_kwargs)
elif method in _select:
inference = _select[method](model=model, **inf_kwargs)
else:
raise KeyError(
"method should be one of %s or Inference instance" % set(_select.keys())
)
elif isinstance(method, Inference):
inference = method
else:
raise TypeError("method should be one of %s " "or Inference instance" % set(_select.keys()))
return inference.fit(n, **kwargs)
|
41,511 | def tmu_tilde(mu, data, pdf, init_pars, par_bounds, fixed_vals):
r"""
The test statistic, :math:`\tilde{t}_{\mu}`, for establishing a two-sided
interval on the strength parameter, :math:`\mu`, for models with
bounded POI, as defiend in Equation (11) in :xref:`arXiv:1007.1727`
.. math::
\tilde{t}_{\mu} = -2\ln\tilde{\lambda}\left(\mu\right)
where :math:`\tilde{\lambda}\left(\mu\right)` is the constrained profile likelihood ratio as defined in Equation (10)
.. math::
:nowrap:
\begin{equation}
\tilde{\lambda}\left(\mu\right) = \left\{\begin{array}{ll}
\frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}(\mu)\right)}{L\left(\hat{\mu}, \hat{\hat{\boldsymbol{\theta}}}(0)\right)}, &\hat{\mu} < 0,\\
\frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}(\mu)\right)}{L\left(\hat{\mu}, \hat{\boldsymbol{\theta}}\right)}, &\hat{\mu} \geq 0.
\end{array}\right.
\end{equation}
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.hepdata_like(
... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> test_mu = 1.0
>>> init_pars = model.config.suggested_init()
>>> par_bounds = model.config.suggested_bounds()
>>> fixed_vals = []
>>> pyhf.infer.test_statistics.tmu_tilde(test_mu, data, model, init_pars, par_bounds, [])
array(3.93824492)
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
fixed_vals (`list`): Parameters held constant in the fit
Returns:
Float: The calculated test statistic, :math:`\tilde{t}_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] != 0:
log.warning(
'tmu_tilde test statistic used for fit configuration with POI not bounded at zero.\n'
+ 'Use the tmu test statistic (pyhf.infer.test_statistics.tmu) instead.'
)
return _tmu_like(mu, data, pdf, init_pars, par_bounds, fixed_vals)
| def tmu_tilde(mu, data, pdf, init_pars, par_bounds, fixed_vals):
r"""
The test statistic, :math:`\tilde{t}_{\mu}`, for establishing a two-sided
interval on the strength parameter, :math:`\mu`, for models with
bounded POI, as defiend in Equation (11) in :xref:`arXiv:1007.1727`
.. math::
\tilde{t}_{\mu} = -2\ln\tilde{\lambda}\left(\mu\right)
where :math:`\tilde{\lambda}\left(\mu\right)` is the constrained profile likelihood ratio as defined in Equation (10)
.. math::
:nowrap:
\begin{equation}
\tilde{\lambda}\left(\mu\right) = \left\{\begin{array}{ll}
\frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}(\mu)\right)}{L\left(\hat{\mu}, \hat{\hat{\boldsymbol{\theta}}}(0)\right)}, &\hat{\mu} < 0,\\
\frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}(\mu)\right)}{L\left(\hat{\mu}, \hat{\boldsymbol{\theta}}\right)}, &\hat{\mu} \geq 0.
\end{array}\right.
\end{equation}
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.hepdata_like(
... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> test_mu = 1.0
>>> init_pars = model.config.suggested_init()
>>> par_bounds = model.config.suggested_bounds()
>>> fixed_vals = []
>>> pyhf.infer.test_statistics.tmu_tilde(test_mu, data, model, init_pars, par_bounds, fixed_vals)
array(3.93824492)
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
fixed_vals (`list`): Parameters held constant in the fit
Returns:
Float: The calculated test statistic, :math:`\tilde{t}_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] != 0:
log.warning(
'tmu_tilde test statistic used for fit configuration with POI not bounded at zero.\n'
+ 'Use the tmu test statistic (pyhf.infer.test_statistics.tmu) instead.'
)
return _tmu_like(mu, data, pdf, init_pars, par_bounds, fixed_vals)
|
13,245 | def find_bad_templates(self):
pt_errors = {}
html_output = ''
pts = self.ZopeFind(self,
obj_metatypes=('Page Template',
'Filesystem Page Template'),
search_sub=True)
for (pt_path, pt) in pts:
if not pt_path.startswith('/'):
pt_path = '/%s' % pt_path
try:
pt.pt_macros()
except PTRuntimeError:
# html quote "<" characters to be displayed as such
errs = [err.replace('<', '<') for err in pt._v_errors]
pt_errors[pt_path] = errs
for pt_path in sorted(pt_errors.keys()):
html_output += ERROR_HTML % {'pt_path': pt_path,
'broken': '\n\n'.join(pt_errors[pt_path])}
return RESULT_HTML % {'count': len(pt_errors),
'total': len(pts),
'error_html': html_output}
| def find_bad_templates(self):
pt_errors = {}
html_output = ''
pts = self.ZopeFind(self,
obj_metatypes=('Page Template',
'Filesystem Page Template'),
search_sub=True)
for (pt_path, pt) in pts:
if not pt_path.startswith('/'):
pt_path = '/%s' % pt_path
try:
pt.pt_macros()
except PTRuntimeError:
# html quote "<" characters to be displayed as such
pt_errors[pt_path] = [err.replace('<', '<') for err in pt._v_errors]
for pt_path in sorted(pt_errors.keys()):
html_output += ERROR_HTML % {'pt_path': pt_path,
'broken': '\n\n'.join(pt_errors[pt_path])}
return RESULT_HTML % {'count': len(pt_errors),
'total': len(pts),
'error_html': html_output}
|
23,063 | def concat(
dfs,
axis=0,
join="outer",
interleave_partitions=False,
ignore_unknown_divisions=False,
):
""" Concatenate DataFrames along rows.
- When axis=0 (default), concatenate DataFrames row-wise:
- If all divisions are known and ordered, concatenate DataFrames keeping
divisions. When divisions are not ordered, specifying
interleave_partition=True allows concatenate divisions each by each.
- If any of division is unknown, concatenate DataFrames resetting its
division to unknown (None)
- When axis=1, concatenate DataFrames column-wise:
- Allowed if all divisions are known.
- If any of division is unknown, it raises ValueError.
Parameters
----------
dfs : list
List of dask.DataFrames to be concatenated
axis : {0, 1, 'index', 'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis
interleave_partitions : bool, default False
Whether to concatenate DataFrames ignoring its order. If True, every
divisions are concatenated each by each.
ignore_unknown_divisions: boolean, default False
Whether to warn when concatenating dask series/dataframes
with unknown divisions. If True the warning won't be emitted.
Notes
-----
This differs in from ``pd.concat`` in the when concatenating Categoricals
with different categories. Pandas currently coerces those to objects
before concatenating. Coercing to objects is very expensive for large
arrays, so dask preserves the Categoricals by taking the union of
the categories.
Examples
--------
If all divisions are known and ordered, divisions are kept.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(6, 8, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 3, 6, 8, 10)>
Unable to concatenate if divisions are not ordered.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(2, 3, 6)>
>>> dd.concat([a, b]) # doctest: +SKIP
ValueError: All inputs have known divisions which cannot be concatenated
in order. Specify interleave_partitions=True to ignore order
Specify interleave_partitions=True to ignore the division order.
>>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 2, 3, 5, 6)>
If any of division is unknown, the result division will be unknown
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(None, None)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(1, 4, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
If we set ignore_unknown_divisions=True, the warning won't be emitted
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(None, None)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(1, 4, 10)>
>>> dd.concat([a, b], ignore_unknown_divisions=True)# doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
Different categoricals are unioned
>> dd.concat([ # doctest: +SKIP
... dd.from_pandas(pd.Series(['a', 'b'], dtype='category'), 1),
... dd.from_pandas(pd.Series(['a', 'c'], dtype='category'), 1),
... ], interleave_partitions=True).dtype
CategoricalDtype(categories=['a', 'b', 'c'], ordered=False)
"""
if not isinstance(dfs, list):
raise TypeError("dfs must be a list of DataFrames/Series objects")
if len(dfs) == 0:
raise ValueError("No objects to concatenate")
if len(dfs) == 1:
if axis == 1 and isinstance(dfs[0], Series):
return dfs[0].to_frame()
else:
return dfs[0]
if join not in ("inner", "outer"):
raise ValueError("'join' must be 'inner' or 'outer'")
axis = DataFrame._validate_axis(axis)
dasks = [df for df in dfs if isinstance(df, _Frame)]
dfs = _maybe_from_pandas(dfs)
if axis == 1:
if all(df.known_divisions for df in dasks):
return concat_indexed_dataframes(dfs, axis=axis, join=join)
elif (
len(dasks) == len(dfs)
and all(not df.known_divisions for df in dfs)
and len({df.npartitions for df in dasks}) == 1
):
if not ignore_unknown_divisions:
warnings.warn(
"Concatenating dataframes with unknown divisions.\n"
"We're assuming that the indexes of each dataframes"
" are \n aligned. This assumption is not generally "
"safe."
)
return concat_unindexed_dataframes(dfs)
else:
raise ValueError(
"Unable to concatenate DataFrame with unknown "
"division specifying axis=1"
)
else:
if all(df.known_divisions for df in dasks):
# each DataFrame's division must be greater than previous one
if all(
dfs[i].divisions[-1] < dfs[i + 1].divisions[0]
for i in range(len(dfs) - 1)
):
divisions = []
for df in dfs[:-1]:
# remove last to concatenate with next
divisions += df.divisions[:-1]
divisions += dfs[-1].divisions
return stack_partitions(dfs, divisions, join=join)
elif interleave_partitions:
return concat_indexed_dataframes(dfs, join=join)
else:
divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)
return stack_partitions(dfs, divisions, join=join)
else:
divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)
return stack_partitions(dfs, divisions, join=join)
| def concat(
dfs,
axis=0,
join="outer",
interleave_partitions=False,
ignore_unknown_divisions=False,
):
""" Concatenate DataFrames along rows.
- When axis=0 (default), concatenate DataFrames row-wise:
- If all divisions are known and ordered, concatenate DataFrames keeping
divisions. When divisions are not ordered, specifying
interleave_partition=True allows concatenate divisions each by each.
- If any of division is unknown, concatenate DataFrames resetting its
division to unknown (None)
- When axis=1, concatenate DataFrames column-wise:
- Allowed if all divisions are known.
- If any of division is unknown, it raises ValueError.
Parameters
----------
dfs : list
List of dask.DataFrames to be concatenated
axis : {0, 1, 'index', 'columns'}, default 0
The axis to concatenate along
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis
interleave_partitions : bool, default False
Whether to concatenate DataFrames ignoring its order. If True, every
divisions are concatenated each by each.
ignore_unknown_divisions : bool, default False
Whether to warn when concatenating dask series/dataframes
with unknown divisions. If True the warning won't be emitted.
Notes
-----
This differs in from ``pd.concat`` in the when concatenating Categoricals
with different categories. Pandas currently coerces those to objects
before concatenating. Coercing to objects is very expensive for large
arrays, so dask preserves the Categoricals by taking the union of
the categories.
Examples
--------
If all divisions are known and ordered, divisions are kept.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(6, 8, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 3, 6, 8, 10)>
Unable to concatenate if divisions are not ordered.
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(1, 3, 5)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(2, 3, 6)>
>>> dd.concat([a, b]) # doctest: +SKIP
ValueError: All inputs have known divisions which cannot be concatenated
in order. Specify interleave_partitions=True to ignore order
Specify interleave_partitions=True to ignore the division order.
>>> dd.concat([a, b], interleave_partitions=True) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(1, 2, 3, 5, 6)>
If any of division is unknown, the result division will be unknown
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(None, None)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(1, 4, 10)>
>>> dd.concat([a, b]) # doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
If we set ignore_unknown_divisions=True, the warning won't be emitted
>>> a # doctest: +SKIP
dd.DataFrame<x, divisions=(None, None)>
>>> b # doctest: +SKIP
dd.DataFrame<y, divisions=(1, 4, 10)>
>>> dd.concat([a, b], ignore_unknown_divisions=True)# doctest: +SKIP
dd.DataFrame<concat-..., divisions=(None, None, None, None)>
Different categoricals are unioned
>> dd.concat([ # doctest: +SKIP
... dd.from_pandas(pd.Series(['a', 'b'], dtype='category'), 1),
... dd.from_pandas(pd.Series(['a', 'c'], dtype='category'), 1),
... ], interleave_partitions=True).dtype
CategoricalDtype(categories=['a', 'b', 'c'], ordered=False)
"""
if not isinstance(dfs, list):
raise TypeError("dfs must be a list of DataFrames/Series objects")
if len(dfs) == 0:
raise ValueError("No objects to concatenate")
if len(dfs) == 1:
if axis == 1 and isinstance(dfs[0], Series):
return dfs[0].to_frame()
else:
return dfs[0]
if join not in ("inner", "outer"):
raise ValueError("'join' must be 'inner' or 'outer'")
axis = DataFrame._validate_axis(axis)
dasks = [df for df in dfs if isinstance(df, _Frame)]
dfs = _maybe_from_pandas(dfs)
if axis == 1:
if all(df.known_divisions for df in dasks):
return concat_indexed_dataframes(dfs, axis=axis, join=join)
elif (
len(dasks) == len(dfs)
and all(not df.known_divisions for df in dfs)
and len({df.npartitions for df in dasks}) == 1
):
if not ignore_unknown_divisions:
warnings.warn(
"Concatenating dataframes with unknown divisions.\n"
"We're assuming that the indexes of each dataframes"
" are \n aligned. This assumption is not generally "
"safe."
)
return concat_unindexed_dataframes(dfs)
else:
raise ValueError(
"Unable to concatenate DataFrame with unknown "
"division specifying axis=1"
)
else:
if all(df.known_divisions for df in dasks):
# each DataFrame's division must be greater than previous one
if all(
dfs[i].divisions[-1] < dfs[i + 1].divisions[0]
for i in range(len(dfs) - 1)
):
divisions = []
for df in dfs[:-1]:
# remove last to concatenate with next
divisions += df.divisions[:-1]
divisions += dfs[-1].divisions
return stack_partitions(dfs, divisions, join=join)
elif interleave_partitions:
return concat_indexed_dataframes(dfs, join=join)
else:
divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)
return stack_partitions(dfs, divisions, join=join)
else:
divisions = [None] * (sum([df.npartitions for df in dfs]) + 1)
return stack_partitions(dfs, divisions, join=join)
|
31,103 | def create_alert_output(item: Dict, table_headers: List[str]) -> Dict[str, Optional[Any]]:
"""
Create the complete output dictionary for an alert.
Args:
item (dict): A source dictionary from the API response.
table_headers (list(str)): The table headers to be used when creating initial data.
Returns:
object_data (dict(str)): The output dictionary.
"""
object_data = {field: item.get(field) for field in table_headers + ['groupKey', 'product']}
managed_agent = item.get('managedAgent')
if managed_agent:
object_data['managedAgentId'] = managed_agent.get('id')
object_data['managedAgentType'] = managed_agent.get('type')
tenant = item.get('tenant')
if tenant:
object_data['tenantId'] = tenant.get('id')
object_data['tenantName'] = tenant.get('name')
person = item.get('person')
if person:
object_data['person'] = person.get('id')
return object_data
| def create_alert_output(item: Dict, table_headers: List[str]) -> Dict[str, Optional[Any]]:
"""
Create the complete output dictionary for an alert.
Args:
item (dict): A source dictionary from the API response.
table_headers (list(str)): The table headers to be used when creating initial data.
Returns:
object_data (dict(str)): The output dictionary.
"""
alert_data = {field: item.get(field) for field in table_headers + ['groupKey', 'product']}
managed_agent = item.get('managedAgent')
if managed_agent:
object_data['managedAgentId'] = managed_agent.get('id')
object_data['managedAgentType'] = managed_agent.get('type')
tenant = item.get('tenant')
if tenant:
object_data['tenantId'] = tenant.get('id')
object_data['tenantName'] = tenant.get('name')
person = item.get('person')
if person:
object_data['person'] = person.get('id')
return object_data
|
31,734 | def get_no_update_value(response: requests.Response) -> bool:
"""
detect if the feed response has been modified according to the headers etag and last_modified.
For more information, see this:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Last-Modified
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag
Args:
response: (requests.Response) The feed response.
Returns:
boolean with the value for noUpdate argument.
The value should be False if the response was modified.
"""
context = demisto.getIntegrationContext()
old_etag = context.get('etag')
old_last_modified = context.get('last_modified')
etag = response.headers.get('ETag')
last_modified = response.headers.get('Last-Modified')
demisto.setIntegrationContext({'last_modified': last_modified, 'etag': etag})
if old_etag and old_etag != etag:
return False
if old_last_modified and old_last_modified != last_modified:
return False
return True
| def get_no_update_value(response: requests.Response) -> bool:
"""
detect if the feed response has been modified according to the headers etag and last_modified.
For more information, see this:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Last-Modified
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag
Args:
response: (requests.Response) The feed response.
Returns:
boolean with the value for noUpdate argument.
The value should be False if the response was modified.
"""
context = get_integration_context()
old_etag = context.get('etag')
old_last_modified = context.get('last_modified')
etag = response.headers.get('ETag')
last_modified = response.headers.get('Last-Modified')
demisto.setIntegrationContext({'last_modified': last_modified, 'etag': etag})
if old_etag and old_etag != etag:
return False
if old_last_modified and old_last_modified != last_modified:
return False
return True
|
43,783 | def pauli_group_generator(n_qubits, wire_map=None):
"""Generator for iterating over the n-qubit Pauli group.
The :math:`n`-qubit Pauli group has size :math:`4^n`, thus it may not be desirable
to construct it in full and store. This function allows for iteration over elements
of the Pauli group with no storage involved.
The order of iteration is based on the binary symplectic representation of
the Pauli group as :math:`2n`-bit strings. Ordering is done by converting
the integers :math:`0` to :math:`2^{2n}` to binary strings, and converting those
strings to Pauli operators using the ``binary_to_pauli`` method.
Args:
n_qubits (int): The number of qubits for which to create the group.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels
used in the Pauli word as keys, and unique integer labels as their values.
If no wire map is provided, wires will be labeled by integers between 0 and ``n_qubits``.
Returns:
(qml.Operation): The next Pauli word in the group.
**Example**
The ``pauli_group_generator`` can be used to loop over the Pauli group as follows:
.. code-block:: python
from pennylane.grouping.pauli_group import pauli_group_generator
n_qubits = 3
for p in pauli_group_generator(n_qubits):
print(p)
Alternatively, using a custom wire map,
.. code-block:: python
from pennylane.wires import Wires
from pennylane.grouping.pauli_group import pauli_group_generator
n_qubits = 3
wire_map = {'a' : 0, 'b' : 1, 'c' : 2}
for p in pauli_group_generator(n_qubits, wire_map=wire_map):
print(p)
"""
if not isinstance(n_qubits, int):
raise TypeError("Must specify an integer number of qubits construct the Pauli group.")
if n_qubits <= 0:
raise ValueError("Number of qubits must be at least 1 to construct Pauli group.")
element_idx = 0
if not wire_map:
wire_map = {wire_idx: wire_idx for wire_idx in range(n_qubits)}
while element_idx < 4 ** n_qubits:
binary_string = format(element_idx, f"#0{2*n_qubits+2}b")[2:]
binary_vector = [float(b) for b in binary_string]
yield binary_to_pauli(binary_vector, wire_map=wire_map)
element_idx += 1
| def pauli_group_generator(n_qubits, wire_map=None):
"""Generator for iterating over the n-qubit Pauli group.
The :math:`n`-qubit Pauli group has size :math:`4^n`, thus it may not be desirable
to construct it in full and store. This function allows for iteration over elements
of the Pauli group with no storage involved.
The order of iteration is based on the binary symplectic representation of
the Pauli group as :math:`2n`-bit strings. Ordering is done by converting
the integers :math:`0` to :math:`2^{2n}` to binary strings, and converting those
strings to Pauli operators using the ``binary_to_pauli`` method.
Args:
n_qubits (int): The number of qubits for which to create the group.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels
used in the Pauli word as keys, and unique integer labels as their values.
If no wire map is provided, wires will be labeled by integers between 0 and ``n_qubits``.
Returns:
.Operation: The next Pauli word in the group.
**Example**
The ``pauli_group_generator`` can be used to loop over the Pauli group as follows:
.. code-block:: python
from pennylane.grouping.pauli_group import pauli_group_generator
n_qubits = 3
for p in pauli_group_generator(n_qubits):
print(p)
Alternatively, using a custom wire map,
.. code-block:: python
from pennylane.wires import Wires
from pennylane.grouping.pauli_group import pauli_group_generator
n_qubits = 3
wire_map = {'a' : 0, 'b' : 1, 'c' : 2}
for p in pauli_group_generator(n_qubits, wire_map=wire_map):
print(p)
"""
if not isinstance(n_qubits, int):
raise TypeError("Must specify an integer number of qubits construct the Pauli group.")
if n_qubits <= 0:
raise ValueError("Number of qubits must be at least 1 to construct Pauli group.")
element_idx = 0
if not wire_map:
wire_map = {wire_idx: wire_idx for wire_idx in range(n_qubits)}
while element_idx < 4 ** n_qubits:
binary_string = format(element_idx, f"#0{2*n_qubits+2}b")[2:]
binary_vector = [float(b) for b in binary_string]
yield binary_to_pauli(binary_vector, wire_map=wire_map)
element_idx += 1
|
280 | def sample_prior_predictive(samples=500,
model: Optional[Model]=None,
vars: Optional[TIterable[str]] = None,
var_names: Optional[TIterable[str]] = None,
random_seed=None) -> Dict[str, np.ndarray]:
"""Generate samples from the prior predictive distribution.
Parameters
----------
samples : int
Number of samples from the prior predictive to generate. Defaults to 500.
model : Model (optional if in `with` context)
vars : Iterable[str]
A list of names of variables for which to compute the posterior predictive
samples. *DEPRECATED* - Use `var_names` argument instead.
var_names : Iterable[str]
A list of names of variables for which to compute the posterior predictive
samples. Defaults to `model.named_vars`.
random_seed : int
Seed for the random number generator.
Returns
-------
dict
Dictionary with variable names as keys. The values are numpy arrays of prior
samples.
"""
model = modelcontext(model)
if vars is None and var_names is None:
vars = set(model.named_vars.keys())
vars_ = model.named_vars
elif vars is None:
vars = var_names
vars_ = vars
elif vars is not None:
warnings.warn("vars argument is deprecated in favor of var_names.",
DeprecationWarning)
vars_ = vars
else:
raise ValueError("Cannot supply both vars and var_names arguments.")
vars = cast(TIterable[str], vars) # tell mypy that vars cannot be None here.
if random_seed is not None:
np.random.seed(random_seed)
names = get_default_varnames(vars_, include_transformed=False)
# draw_values fails with auto-transformed variables. transform them later!
values = draw_values([model[name] for name in names], size=samples)
data = {k: v for k, v in zip(names, values)}
if data is None:
raise AssertionError("No variables sampled: attempting to sample %s"%names)
prior = {} # type: Dict[str, np.ndarray]
for var_name in vars:
if var_name in data:
prior[var_name] = data[var_name]
elif is_transformed_name(var_name):
untransformed = get_untransformed_name(var_name)
if untransformed in data:
prior[var_name] = model[untransformed].transformation.forward_val(
data[untransformed])
return prior
| def sample_prior_predictive(samples=500,
model: Optional[Model]=None,
vars: Optional[TIterable[str]] = None,
var_names: Optional[TIterable[str]] = None,
random_seed=None) -> Dict[str, np.ndarray]:
"""Generate samples from the prior predictive distribution.
Parameters
----------
samples : int
Number of samples from the prior predictive to generate. Defaults to 500.
model : Model (optional if in `with` context)
vars : Iterable[str]
A list of names of variables for which to compute the posterior predictive
samples. *DEPRECATED* - Use ``var_names`` argument instead.
var_names : Iterable[str]
A list of names of variables for which to compute the posterior predictive
samples. Defaults to `model.named_vars`.
random_seed : int
Seed for the random number generator.
Returns
-------
dict
Dictionary with variable names as keys. The values are numpy arrays of prior
samples.
"""
model = modelcontext(model)
if vars is None and var_names is None:
vars = set(model.named_vars.keys())
vars_ = model.named_vars
elif vars is None:
vars = var_names
vars_ = vars
elif vars is not None:
warnings.warn("vars argument is deprecated in favor of var_names.",
DeprecationWarning)
vars_ = vars
else:
raise ValueError("Cannot supply both vars and var_names arguments.")
vars = cast(TIterable[str], vars) # tell mypy that vars cannot be None here.
if random_seed is not None:
np.random.seed(random_seed)
names = get_default_varnames(vars_, include_transformed=False)
# draw_values fails with auto-transformed variables. transform them later!
values = draw_values([model[name] for name in names], size=samples)
data = {k: v for k, v in zip(names, values)}
if data is None:
raise AssertionError("No variables sampled: attempting to sample %s"%names)
prior = {} # type: Dict[str, np.ndarray]
for var_name in vars:
if var_name in data:
prior[var_name] = data[var_name]
elif is_transformed_name(var_name):
untransformed = get_untransformed_name(var_name)
if untransformed in data:
prior[var_name] = model[untransformed].transformation.forward_val(
data[untransformed])
return prior
|
14,044 | def _delegate_binary_method(op, this, other, align, *args, **kwargs):
# type: (str, GeoSeries, GeoSeries) -> GeoSeries/Series
this = this.geometry
if isinstance(other, GeoPandasBase):
if align and not this.index.equals(other.index):
warn("The indices of the two GeoSeries are different.")
this, other = this.align(other.geometry)
else:
other = other.geometry
a_this = GeometryArray(this.values)
other = GeometryArray(other.values)
elif isinstance(other, BaseGeometry):
a_this = GeometryArray(this.values)
else:
raise TypeError(type(this), type(other))
data = getattr(a_this, op)(other, *args, **kwargs)
return data, this.index
| def _delegate_binary_method(op, this, other, align, *args, **kwargs):
# type: (str, GeoSeries, GeoSeries) -> GeoSeries/Series
this = this.geometry
if isinstance(other, GeoPandasBase):
if align is None and not this.index.equals(other.index):
warn("The indices of the two GeoSeries are different.")
this, other = this.align(other.geometry)
else:
other = other.geometry
a_this = GeometryArray(this.values)
other = GeometryArray(other.values)
elif isinstance(other, BaseGeometry):
a_this = GeometryArray(this.values)
else:
raise TypeError(type(this), type(other))
data = getattr(a_this, op)(other, *args, **kwargs)
return data, this.index
|
54,296 | def tmu_tilde(mu, data, pdf, init_pars, par_bounds):
r"""
The test statistic, :math:`t_{\mu}`, for establishing an two-sided
intervals on the strength parameter, :math:`\mu` for models with
bounded POI.
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
Returns:
Float: The calculated test statistic, :math:`q_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] != 0:
log.warning(
'tmu tilde test statistic used for fit configuration with POI not bounded at zero. Use tmu.'
)
return _tmu_like(mu, data, pdf, init_pars, par_bounds)
| def tmu_tilde(mu, data, pdf, init_pars, par_bounds):
r"""
The test statistic, :math:`t_{\mu}`, for establishing an two-sided
intervals on the strength parameter, :math:`\mu` for models with
bounded POI.
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
Returns:
Float: The calculated test statistic, :math:`q_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] != 0:
log.warning(
'tmu_tilde test statistic used for fit configuration with POI not bounded at zero.\n'
+ 'Use the tmu test statistic (pyhf.infer.test_statistics.tmu) instead.'
)
return _tmu_like(mu, data, pdf, init_pars, par_bounds)
|
30,733 | def close_alerts(args, close_action, client=CLIENT):
readable_close_action = "closed_as_security" if close_action == "delete_rules" else "closed_as_change"
human_readable = f'Command changes the status of alerts passed as "{readable_close_action}" in Nozomi Networks platform.'
extracted_ids = ids_from_args(args)
response = client.http_post_request(
'/api/open/alerts/close',
{"ids": extracted_ids, "close_action": close_action})
result = 'SUCCESS' if wait_for_job_result(response['result']['id'], 'close', client) else 'FAIL'
return {
'outputs': result,
'outputs_prefix': 'Nozomi.Ids',
'outputs_key_field': '',
'readable_output': human_readable
}
| def close_alerts(args, close_action, client=CLIENT):
readable_close_action = "closed_as_security" if close_action == "delete_rules" else "closed_as_change"
human_readable = f'Command changes the status of the following alerts: {ids} passed as "{readable_close_action}" in Nozomi Networks platform.'
extracted_ids = ids_from_args(args)
response = client.http_post_request(
'/api/open/alerts/close',
{"ids": extracted_ids, "close_action": close_action})
result = 'SUCCESS' if wait_for_job_result(response['result']['id'], 'close', client) else 'FAIL'
return {
'outputs': result,
'outputs_prefix': 'Nozomi.Ids',
'outputs_key_field': '',
'readable_output': human_readable
}
|
6,822 | def execute():
__usersettings = frappe.db.sql("""SELECT * FROM __UserSettings""", as_dict=True)
user_settings_fields = ["name", "user", "document_type", "last_view", "updated_on"]
user_settings = []
user_view_settings_fields = ["name", "view", "sort_by", "sort_order", "document_type", "filters", "fields",
"parenttype", "parent", "parentfield", "creation", "modified", "modified_by"]
user_view_settings = []
datetime = frappe.utils.get_datetime_str(frappe.utils.now_datetime())
for _usersettings in __usersettings:
settings = json.loads(_usersettings.get("data"))
name = "{0}-{1}".format(_usersettings.get("doctype"), _usersettings.get("user"))
user_settings.append((
name,
_usersettings.get("user"),
_usersettings.get("doctype"),
settings.get("last_view"),
settings.get("created_on")
))
for view in ['List', 'Gantt', 'Kanban', 'Calendar', 'Image', 'Inbox', 'Report']:
if not settings.get(view):
continue
view_data = settings.get(view)
if isinstance(view_data, string_types):
view_data = json.loads(view_data)
user_view_settings.append((
frappe.generate_hash(view+name+_usersettings.get("user")+_usersettings.get("doctype")),
view,
view_data.get("sort_by"),
view_data.get("sort_order"),
_usersettings.get("doctype"),
json.dumps(view_data.get("filters")),
json.dumps(view_data.get("fields")),
"User Settings",
name,
"views",
datetime,
datetime,
"Administrator"
))
frappe.db.bulk_insert("User Settings", user_settings_fields, user_settings)
frappe.db.bulk_insert("User View Settings", user_view_settings_fields, user_view_settings, True) | def execute():
__usersettings = frappe.db.sql("""SELECT * FROM __UserSettings""", as_dict=True)
user_settings_fields = ["name", "user", "document_type", "last_view", "updated_on"]
user_settings = []
user_view_settings_fields = ["name", "view", "sort_by", "sort_order", "document_type", "filters", "fields",
"parenttype", "parent", "parentfield", "creation", "modified", "modified_by"]
user_view_settings = []
datetime = frappe.utils.get_datetime_str(frappe.utils.now_datetime())
for _usersettings in __usersettings:
settings = json.loads(_usersettings.get("data"))
name = "{0}-{1}".format(_usersettings.get("doctype"), _usersettings.get("user"))
user_settings.append((
name,
_usersettings.get("user"),
_usersettings.get("doctype"),
settings.get("last_view"),
settings.get("created_on")
))
for view in ['List', 'Gantt', 'Kanban', 'Calendar', 'Image', 'Inbox', 'Report']:
if not settings.get(view):
continue
view_data = settings.get(view)
if isinstance(view_data, string_types):
view_data = json.loads(view_data)
user_view_settings.append((
frappe.generate_hash(view + name + _usersettings.get("user") + _usersettings.get("doctype")),
view,
view_data.get("sort_by"),
view_data.get("sort_order"),
_usersettings.get("doctype"),
json.dumps(view_data.get("filters")),
json.dumps(view_data.get("fields")),
"User Settings",
name,
"views",
datetime,
datetime,
"Administrator"
))
frappe.db.bulk_insert("User Settings", user_settings_fields, user_settings)
frappe.db.bulk_insert("User View Settings", user_view_settings_fields, user_view_settings, True) |
11,747 | def get_openwrt_platform():
"""
Add this workaround for detecting OpenWRT products because
the version and product information is contained in the /etc/openwrt_release file.
"""
result = [None, None, None]
openwrt_version = re.compile("^DISTRIB_RELEASE=['\"](\d+\.\d+.\d+)['\"]")
openwrt_product = re.compile("^DISTRIB_ID=['\"]([\w-]+)['\"]")
with open('/etc/openwrt_release', 'r') as fh:
content = fh.readlines()
for line in content:
version_matches = openwrt_version.match(line)
product_matches = openwrt_product.match(line)
if version_matches:
result[1] = version_matches.group(1)
elif product_matches:
if product_matches.group(1) == "OpenWrt":
result[0] = "openwrt"
return result | def get_openwrt_platform():
"""
Add this workaround for detecting OpenWRT products because
the version and product information is contained in the /etc/openwrt_release file.
"""
result = [None, None, None]
openwrt_version = re.compile(r"^DISTRIB_RELEASE=['\"](\d+\.\d+.\d+)['\"]")
openwrt_product = re.compile("^DISTRIB_ID=['\"]([\w-]+)['\"]")
with open('/etc/openwrt_release', 'r') as fh:
content = fh.readlines()
for line in content:
version_matches = openwrt_version.match(line)
product_matches = openwrt_product.match(line)
if version_matches:
result[1] = version_matches.group(1)
elif product_matches:
if product_matches.group(1) == "OpenWrt":
result[0] = "openwrt"
return result |
48,988 | def test_workload_identity_federation_aws(capsys: CaptureFixture) -> None:
project_id = os.environ['GOOGLE_CLOUD_PROJECT']
# Replace the below variables with your AWS EC2 credentials.
aws_access_key_id = "AKIA000000000EXAMPLE"
aws_secret_access_key = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
workload_identity_federation.create_token_aws(project_id, "provider_id", "pool_id", aws_access_key_id,
aws_secret_access_key)
out, _ = capsys.readouterr()
assert re.search("URL encoded token:", out)
| def test_workload_identity_federation_aws(capsys: CaptureFixture) -> None:
import google.auth
credentials, project_id = google.auth.default()
# Replace the below variables with your AWS EC2 credentials.
aws_access_key_id = "AKIA000000000EXAMPLE"
aws_secret_access_key = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
workload_identity_federation.create_token_aws(project_id, "provider_id", "pool_id", aws_access_key_id,
aws_secret_access_key)
out, _ = capsys.readouterr()
assert re.search("URL encoded token:", out)
|
32,635 | def fetch_incidents(args: dict, params: dict):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
last_fetch = demisto.getLastRun()
first_fetch = params.get('first_fetch')
attribute_key = params.get('AttributeKey')
if not attribute_key:
attribute_key = 'EventName'
attribute_value = params.get('AttributeValue')
fetch_limit = int(params.get('fetch_limit'))
if fetch_limit > 50 or fetch_limit <= 0:
fetch_limit = 50
fetch_start_time = calculate_fetch_start_time(last_fetch, first_fetch)
demisto.debug("Fetch start time")
demisto.debug(str(fetch_start_time))
incidents = []
incident_created_time = fetch_start_time
kwargs = {
'LookupAttributes': [{
'AttributeKey': attribute_key,
'AttributeValue': attribute_value
}]
}
kwargs.update({'StartTime': fetch_start_time})
client.lookup_events(**kwargs)
paginator = client.get_paginator('lookup_events')
for response in paginator.paginate(PaginationConfig={'MaxItems': fetch_limit}, **kwargs):
for i, event in enumerate(response['Events']):
incident = {
'EventId': event.get('EventId'),
'Name': event.get('EventName'),
'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')),
'EventSource': event.get('EventSource'),
'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None,
'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None,
'CloudTrailEvent': event.get('CloudTrailEvent'),
'Username': event.get('Username'),
'rawJSON': json.dumps(event, indent=4, sort_keys=True, default=str)
}
incidents.append(incident)
incident_created_time = (event.get('EventTime', '01-01-01T00:00:00') + timedelta(seconds=1)).timestamp()
if incident_created_time > fetch_start_time:
last_fetch = str(incident_created_time)
demisto.setLastRun(last_fetch)
demisto.debug("Last fetch time")
demisto.debug(str(last_fetch))
demisto.incidents(incidents)
| def fetch_incidents(args: dict, params: dict):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
last_fetch = demisto.getLastRun()
first_fetch = params.get('first_fetch')
attribute_key = params.get('AttributeKey')
if not attribute_key:
attribute_key = 'EventName'
attribute_value = params.get('AttributeValue')
fetch_limit = int(params.get('max_fetch', 50))
if fetch_limit > 50 or fetch_limit <= 0:
fetch_limit = 50
fetch_start_time = calculate_fetch_start_time(last_fetch, first_fetch)
demisto.debug("Fetch start time")
demisto.debug(str(fetch_start_time))
incidents = []
incident_created_time = fetch_start_time
kwargs = {
'LookupAttributes': [{
'AttributeKey': attribute_key,
'AttributeValue': attribute_value
}]
}
kwargs.update({'StartTime': fetch_start_time})
client.lookup_events(**kwargs)
paginator = client.get_paginator('lookup_events')
for response in paginator.paginate(PaginationConfig={'MaxItems': fetch_limit}, **kwargs):
for i, event in enumerate(response['Events']):
incident = {
'EventId': event.get('EventId'),
'Name': event.get('EventName'),
'EventTime': handle_returning_date_to_string(event.get('EventTime', '01-01-01T00:00:00')),
'EventSource': event.get('EventSource'),
'ResourceName': event.get('Resources')[0].get('ResourceName') if event.get('Resources') else None,
'ResourceType': event.get('Resources')[0].get('ResourceType') if event.get('Resources') else None,
'CloudTrailEvent': event.get('CloudTrailEvent'),
'Username': event.get('Username'),
'rawJSON': json.dumps(event, indent=4, sort_keys=True, default=str)
}
incidents.append(incident)
incident_created_time = (event.get('EventTime', '01-01-01T00:00:00') + timedelta(seconds=1)).timestamp()
if incident_created_time > fetch_start_time:
last_fetch = str(incident_created_time)
demisto.setLastRun(last_fetch)
demisto.debug("Last fetch time")
demisto.debug(str(last_fetch))
demisto.incidents(incidents)
|
39,195 | def compute_power_spectral_density_matrix(
specgram: Tensor,
mask: Optional[Tensor] = None,
normalize: bool = True,
eps: float = 1e-10,
) -> Tensor:
"""Compute cross-channel power spectral density (PSD) matrix.
Args:
specgram (Tensor): Multi-channel complex-valued spectrum.
Tensor of dimension `(..., channel, freq, time)`
mask (Tensor or None, optional): Real-valued Time-Frequency mask
for normalization. Tensor of dimension `(..., freq, time)`
(Default: ``None``)
normalize (bool, optional): whether normalize the mask along the time dimension.
eps (float, optional): a value added to the denominator in mask normalization. (Default: ``1e-10``)
Returns:
Tensor: The complex-valued PSD matrix of the input spectrum.
Tensor of dimension `(..., freq, channel, channel)`
"""
specgram = specgram.transpose(-3, -2) # shape (freq, channel, time)
# outer product:
# (..., ch_1, time) x (..., ch_2, time) -> (..., time, ch_1, ch_2)
psd = torch.einsum("...ct,...et->...tce", [specgram, specgram.conj()])
if mask is not None:
# Normalized mask along time dimension:
if normalize:
mask = mask / (mask.sum(dim=-1, keepdim=True) + eps)
psd = psd * mask[..., None, None]
psd = psd.sum(dim=-3)
return psd
| def compute_power_spectral_density_matrix(
specgram: Tensor,
mask: Optional[Tensor] = None,
normalize: bool = True,
eps: float = 1e-10,
) -> Tensor:
"""Compute cross-channel power spectral density (PSD) matrix.
Args:
specgram (Tensor): Multi-channel complex-valued spectrum.
Tensor of dimension `(..., channel, freq, time)`
mask (Tensor or None, optional): Real-valued Time-Frequency mask
for normalization. Tensor of dimension `(..., freq, time)`
(Default: ``None``)
normalize (bool, optional): whether normalize the mask along the time dimension.
normalize (bool, optional): whether to normalize the mask along the time dimension.
Returns:
Tensor: The complex-valued PSD matrix of the input spectrum.
Tensor of dimension `(..., freq, channel, channel)`
"""
specgram = specgram.transpose(-3, -2) # shape (freq, channel, time)
# outer product:
# (..., ch_1, time) x (..., ch_2, time) -> (..., time, ch_1, ch_2)
psd = torch.einsum("...ct,...et->...tce", [specgram, specgram.conj()])
if mask is not None:
# Normalized mask along time dimension:
if normalize:
mask = mask / (mask.sum(dim=-1, keepdim=True) + eps)
psd = psd * mask[..., None, None]
psd = psd.sum(dim=-3)
return psd
|
52,749 | def _is_ntlm_message(message):
"""
Checks whether the given string is an NTLM message
"""
if message[:4].lower() == 'ntlm' and message.split(' ', 1)[1:]:
return True
if message[:9].lower() == 'negotiate':
message = message.split(' ', 1)
if message[1:]:
return utils.decode_b64(message[1].encode())[:7] == b'NTLMSSP'
return False
| def _is_ntlm_message(message):
"""
Checks whether the given string is an NTLM message
"""
if message[:4].lower() == 'ntlm' and message.split(' ', 1)[1:]:
return True
if message[:9].lower() == 'negotiate':
try:
return utils.decode_b64(message[9:].strip().encode())[:7] == b'NTLMSSP'
except (UnicodeDecodeError, TypeError):
pass
return False
|
6,242 | def checkSanity(urlTuple, kwargs):
"""
Check that all ssl environment is ok
"""
useCerts = False
certFile = ''
if "useCertificates" in kwargs and kwargs['useCertificates']:
certTuple = Locations.getHostCertificateAndKeyLocation()
if not certTuple:
gLogger.error("No cert/key found! ")
return S_ERROR("No cert/key found! ")
certFile = certTuple[0]
useCerts = True
elif "proxyString" in kwargs:
if (
six.PY2 and not isinstance(
kwargs['proxyString'],
six.string_types)) or (
six.PY3 and not isinstance(
kwargs['proxyString'],
bytes)):
gLogger.error("proxyString parameter is not a valid type", str(type(kwargs['proxyString'])))
return S_ERROR("proxyString parameter is not a valid type")
else:
if "proxyLocation" in kwargs:
certFile = kwargs["proxyLocation"]
else:
certFile = Locations.getProxyLocation()
if not certFile:
gLogger.error("No proxy found")
return S_ERROR("No proxy found")
elif not os.path.isfile(certFile):
gLogger.error("Proxy file does not exist", certFile)
return S_ERROR("%s proxy file does not exist" % certFile)
# For certs always check CA's. For clients skipServerIdentityCheck
if 'skipCACheck' not in kwargs or not kwargs['skipCACheck']:
if not Locations.getCAsLocation():
gLogger.error("No CAs found!")
return S_ERROR("No CAs found!")
if "proxyString" in kwargs:
certObj = X509Chain()
retVal = certObj.loadChainFromString(kwargs['proxyString'])
if not retVal['OK']:
gLogger.error("Can't load proxy string")
return S_ERROR("Can't load proxy string")
else:
if useCerts:
certObj = X509Certificate()
certObj.loadFromFile(certFile)
else:
certObj = X509Chain()
certObj.loadChainFromFile(certFile)
retVal = certObj.hasExpired()
if not retVal['OK']:
gLogger.error("Can't verify proxy or certificate file", "%s:%s" % (certFile, retVal['Message']))
return S_ERROR("Can't verify file %s:%s" % (certFile, retVal['Message']))
else:
if retVal['Value']:
notAfter = certObj.getNotAfterDate()
if notAfter['OK']:
notAfter = notAfter['Value']
else:
notAfter = "unknown"
gLogger.error("PEM file has expired", "%s is not valid after %s" % (certFile, notAfter))
return S_ERROR("PEM file %s has expired, not valid after %s" % (certFile, notAfter))
idDict = {}
retVal = certObj.getDIRACGroup(ignoreDefault=True)
if retVal['OK'] and retVal['Value'] is not False:
idDict['group'] = retVal['Value']
if useCerts:
idDict['DN'] = certObj.getSubjectDN()['Value']
else:
idDict['DN'] = certObj.getIssuerCert()['Value'].getSubjectDN()['Value']
return S_OK(idDict)
| def checkSanity(urlTuple, kwargs):
"""
Check that all ssl environment is ok
"""
useCerts = False
certFile = ''
if "useCertificates" in kwargs and kwargs['useCertificates']:
certTuple = Locations.getHostCertificateAndKeyLocation()
if not certTuple:
gLogger.error("No cert/key found! ")
return S_ERROR("No cert/key found! ")
certFile = certTuple[0]
useCerts = True
elif "proxyString" in kwargs:
if not isinstance(kwargs['proxyString'], six.string_types if six.PY2 else bytes):
gLogger.error("proxyString parameter is not a valid type", str(type(kwargs['proxyString'])))
return S_ERROR("proxyString parameter is not a valid type")
else:
if "proxyLocation" in kwargs:
certFile = kwargs["proxyLocation"]
else:
certFile = Locations.getProxyLocation()
if not certFile:
gLogger.error("No proxy found")
return S_ERROR("No proxy found")
elif not os.path.isfile(certFile):
gLogger.error("Proxy file does not exist", certFile)
return S_ERROR("%s proxy file does not exist" % certFile)
# For certs always check CA's. For clients skipServerIdentityCheck
if 'skipCACheck' not in kwargs or not kwargs['skipCACheck']:
if not Locations.getCAsLocation():
gLogger.error("No CAs found!")
return S_ERROR("No CAs found!")
if "proxyString" in kwargs:
certObj = X509Chain()
retVal = certObj.loadChainFromString(kwargs['proxyString'])
if not retVal['OK']:
gLogger.error("Can't load proxy string")
return S_ERROR("Can't load proxy string")
else:
if useCerts:
certObj = X509Certificate()
certObj.loadFromFile(certFile)
else:
certObj = X509Chain()
certObj.loadChainFromFile(certFile)
retVal = certObj.hasExpired()
if not retVal['OK']:
gLogger.error("Can't verify proxy or certificate file", "%s:%s" % (certFile, retVal['Message']))
return S_ERROR("Can't verify file %s:%s" % (certFile, retVal['Message']))
else:
if retVal['Value']:
notAfter = certObj.getNotAfterDate()
if notAfter['OK']:
notAfter = notAfter['Value']
else:
notAfter = "unknown"
gLogger.error("PEM file has expired", "%s is not valid after %s" % (certFile, notAfter))
return S_ERROR("PEM file %s has expired, not valid after %s" % (certFile, notAfter))
idDict = {}
retVal = certObj.getDIRACGroup(ignoreDefault=True)
if retVal['OK'] and retVal['Value'] is not False:
idDict['group'] = retVal['Value']
if useCerts:
idDict['DN'] = certObj.getSubjectDN()['Value']
else:
idDict['DN'] = certObj.getIssuerCert()['Value'].getSubjectDN()['Value']
return S_OK(idDict)
|
58,386 | def resolve_bot_file_id(file_id):
"""
Given a Bot API-style `file_id <telethon.tl.custom.file.File.id>`,
returns the media it represents. If the `file_id <telethon.tl.custom.file.File.id>`
is not valid, `None` is returned instead.
Note that the `file_id <telethon.tl.custom.file.File.id>` does not have information
such as image dimensions or file size, so these will be zero if present.
For thumbnails, the photo ID and hash will always be zero.
"""
if not file_id:
return None
# end if
if isinstance(file_id, str):
file = FileId.from_file_id(file_id)
elif not isinstance(file_id, FileId):
return None
# end if
if isinstance(file, DocumentFileId):
attributes = []
if file.type_id in (FileId.TYPE_VOICE, FileId.TYPE_AUDIO):
attributes.append(types.DocumentAttributeAudio(
duration=0,
voice=file.type_id == FileId.TYPE_VOICE
))
elif file.type_id == FileId.TYPE_VIDEO or file.type_id == FileId.TYPE_VIDEO_NOTE:
attributes.append(types.DocumentAttributeVideo(
duration=0,
w=0,
h=0,
round_message=file.type_id == FileId.TYPE_VIDEO_NOTE
))
# elif file.type_id == FieldId.TYPE_DOCUMENT: # other, cannot know which --> no attribute
elif file.type_id == FileId.TYPE_STICKER:
attributes.append(types.DocumentAttributeSticker(
alt='',
stickerset=types.InputStickerSetEmpty()
))
elif file.type_id == FileId.TYPE_ANIMATION:
attributes.append(types.DocumentAttributeAnimated())
# end if
return types.Document(
id=file.id,
access_hash=file.access_hash,
date=None,
mime_type='',
size=0,
thumbs=None,
dc_id=file.dc_id,
attributes=attributes,
file_reference=file.file_reference if file.has_reference else b''
)
elif not isinstance(file, PhotoFileId): # e.g. WebLocationFileId
return None
# end if
# okey, so we're dealing with a photo.
assert isinstance(file, PhotoFileId)
photo_size = 's' if file.id or file.access_hash else 'x'
foto_size = file.photosize
if isinstance(foto_size, (PhotoFileId.PhotosizeSourceLegacy, PhotoFileId.PhotosizeSourceThumbnail, PhotoFileId.PhotosizeSourceDialogPhotoSmall, PhotoFileId.PhotosizeSourceDialogPhotoBig, PhotoFileId.PhotosizeSourceStickersetThumbnail)):
sizes = [
types.PhotoSize(
type=photo_size,
location=types.FileLocationToBeDeprecated(
volume_id=foto_size.volume_id,
local_id=foto_size.location_local_id,
),
w=0,
h=0,
size=0
)
]
else:
sizes = []
# end if
return types.Photo(
id=file.id,
access_hash=file.access_hash,
file_reference=file.file_reference,
date=None,
sizes=sizes,
dc_id=file.dc_id,
has_stickers=None
)
| def resolve_bot_file_id(file_id):
"""
Given a Bot API-style `file_id <telethon.tl.custom.file.File.id>`,
returns the media it represents. If the `file_id <telethon.tl.custom.file.File.id>`
is not valid, `None` is returned instead.
Note that the `file_id <telethon.tl.custom.file.File.id>` does not have information
such as image dimensions or file size, so these will be zero if present.
For thumbnails, the photo ID and hash will always be zero.
"""
if not file_id:
return None
# end if
if isinstance(file_id, str):
file = FileId.from_file_id(file_id)
elif not isinstance(file_id, FileId):
return None
# end if
if isinstance(file, DocumentFileId):
attributes = []
if file.type_id in (FileId.TYPE_VOICE, FileId.TYPE_AUDIO):
attributes.append(types.DocumentAttributeAudio(
duration=0,
voice=file.type_id == FileId.TYPE_VOICE
))
elif file.type_id == FileId.TYPE_VIDEO or file.type_id == FileId.TYPE_VIDEO_NOTE:
attributes.append(types.DocumentAttributeVideo(
duration=0,
w=0,
h=0,
round_message=file.type_id == FileId.TYPE_VIDEO_NOTE
))
# elif file.type_id == FieldId.TYPE_DOCUMENT: # other, cannot know which --> no attribute
elif file.type_id == FileId.TYPE_STICKER:
attributes.append(types.DocumentAttributeSticker(
alt='',
stickerset=types.InputStickerSetEmpty()
))
elif file.type_id == FileId.TYPE_ANIMATION:
attributes.append(types.DocumentAttributeAnimated())
# end if
return types.Document(
id=file.id,
access_hash=file.access_hash,
date=None,
mime_type='',
size=0,
thumbs=None,
dc_id=file.dc_id,
attributes=attributes,
file_reference=file.file_reference if file.has_reference else b''
)
elif not isinstance(file, PhotoFileId): # e.g. WebLocationFileId
return None
# end if
if not isinstance(file, PhotoFileId):
return None
photo_size = 's' if file.id or file.access_hash else 'x'
foto_size = file.photosize
if isinstance(foto_size, (PhotoFileId.PhotosizeSourceLegacy, PhotoFileId.PhotosizeSourceThumbnail, PhotoFileId.PhotosizeSourceDialogPhotoSmall, PhotoFileId.PhotosizeSourceDialogPhotoBig, PhotoFileId.PhotosizeSourceStickersetThumbnail)):
sizes = [
types.PhotoSize(
type=photo_size,
location=types.FileLocationToBeDeprecated(
volume_id=foto_size.volume_id,
local_id=foto_size.location_local_id,
),
w=0,
h=0,
size=0
)
]
else:
sizes = []
# end if
return types.Photo(
id=file.id,
access_hash=file.access_hash,
file_reference=file.file_reference,
date=None,
sizes=sizes,
dc_id=file.dc_id,
has_stickers=None
)
|
23,287 | def copy_and_keep_build(site):
"""
Copies each site into the target location and keep last "n" builts as backups
"""
global error_count
for wiki in ALL_WIKIS:
if site=='common':
continue
if not site==None and not site==wiki:
continue
debug('copy: %s' % wiki)
targetdir = os.path.join(args.destdir, wiki)
debug("Creating temporary folders")
previousdir = os.path.join(args.backupdestdir)
debug('Recreating %s' % previousdir )
if not os.path.exists(previousdir):
os.mkdir(previousdir)
olddir = os.path.join(previousdir, str(building_time) )
debug('Recreating %s' % olddir )
if not os.path.exists(olddir):
os.mkdir(olddir)
if os.path.exists(targetdir):
debug('Moving %s into %s' % (targetdir,olddir) )
shutil.move(targetdir, olddir)
sourcedir='./%s/build/html/' % wiki
html_moved_dir = os.path.join(args.destdir, 'html')
try:
subprocess.check_call(['mv', sourcedir, html_moved_dir])
# Rename move! (single move to html/* failed)
subprocess.check_call(['mv', html_moved_dir ,targetdir])
debug("Moved to %s" % targetdir)
except:
error("FAIL moving output to %s" % targetdir)
finally:
debug("Creating a backup in %s" % olddir)
subprocess.check_call(['cp', '-r', targetdir ,olddir])
delete_old_wiki_backups(previousdir, N_BACKUPS_RETAIM)
| def copy_and_keep_build(site):
"""
Copies each site into the target location and keep last "n" builds as backups
"""
global error_count
for wiki in ALL_WIKIS:
if site=='common':
continue
if not site==None and not site==wiki:
continue
debug('copy: %s' % wiki)
targetdir = os.path.join(args.destdir, wiki)
debug("Creating temporary folders")
previousdir = os.path.join(args.backupdestdir)
debug('Recreating %s' % previousdir )
if not os.path.exists(previousdir):
os.mkdir(previousdir)
olddir = os.path.join(previousdir, str(building_time) )
debug('Recreating %s' % olddir )
if not os.path.exists(olddir):
os.mkdir(olddir)
if os.path.exists(targetdir):
debug('Moving %s into %s' % (targetdir,olddir) )
shutil.move(targetdir, olddir)
sourcedir='./%s/build/html/' % wiki
html_moved_dir = os.path.join(args.destdir, 'html')
try:
subprocess.check_call(['mv', sourcedir, html_moved_dir])
# Rename move! (single move to html/* failed)
subprocess.check_call(['mv', html_moved_dir ,targetdir])
debug("Moved to %s" % targetdir)
except:
error("FAIL moving output to %s" % targetdir)
finally:
debug("Creating a backup in %s" % olddir)
subprocess.check_call(['cp', '-r', targetdir ,olddir])
delete_old_wiki_backups(previousdir, N_BACKUPS_RETAIM)
|
58,053 | def get_mapping_fields_command(client: Client) -> GetMappingFieldsResponse:
"""
Returns the list of fields for an incident type.
Args:
client: Xsoar client to use
returns: Dictionairy with keys as field names
"""
incident_type_scheme = SchemeTypeMapping(type_name=client.ticket_type)
demisto.debug(f'Collecting incident mapping for incident type - "{client.ticket_type}"')
for field in TOPDESK_ARGS:
incident_type_scheme.add_field(field)
mapping_response = GetMappingFieldsResponse()
mapping_response.add_scheme_type(incident_type_scheme)
return mapping_response
| def get_mapping_fields_command(client: Client) -> GetMappingFieldsResponse:
"""
Returns the list of fields for an incident type.
Args:
client: Xsoar client to use
returns: Dictionairy with keys as field names
"""
incident_type_scheme = SchemeTypeMapping(type_name='TOPdesk Incident')
demisto.debug(f'Collecting incident mapping for incident type - TOPdesk Incident')
for field in TOPDESK_ARGS:
incident_type_scheme.add_field(field)
mapping_response = GetMappingFieldsResponse()
mapping_response.add_scheme_type(incident_type_scheme)
return mapping_response
|
59,143 | def sam(
adata: AnnData,
max_iter: int = 10,
num_norm_avg: int = 50,
k: int = 20,
distance: str = 'correlation',
standardization: Optional[str] = 'Normalizer',
weight_pcs: bool = True,
npcs: Optional[int] = None,
n_genes: Optional[int] = None,
projection: Optional[str] = 'umap',
inplace: bool = True,
verbose: bool = True,
) -> Optional[AnnData]:
"""Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
The AnnData input should contain unstandardized, non-negative values.
Preferably, the data should be log-normalized and no genes should be filtered out.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when identifying nearest neighbors.
Can be any of the distance metrics supported by sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
standardization - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets with many
expected cell types and 'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
weight_pcs - bool, optional, default True
If True, scale the principal components by their eigenvalues. In
datasets with many expected cell types, setting this to False might
improve the resolution as these cell types might be encoded by low-
variance principal components.
npcs - int, optional, default None,
Determines the number of top principal components selected at each
iteration of the SAM algorithm. If None, this number is chosen
automatically based on the size of the dataset. If weight_pcs is
set to True, this parameter primarily affects the runtime of the SAM
algorithm (more PCs = longer runtime).
n_genes - int, optional, default None:
Determines the number of top SAM-weighted genes to use at each iteration
of the SAM algorithm. If None, this number is chosen automatically
based on the size of the dataset. This parameter primarily affects
the runtime of the SAM algorithm (more genes = longer runtime).
inplace - bool, optional, default True:
Set fields in `adata` if True. Otherwise, returns a copy.
verbose - bool, optional, default True:
If True, displays SAM log statements.
Returns
-------
sam - SAM
The SAM object
adata - AnnData
`.var['weights']`
SAM weights for each gene.
`.var['spatial_dispersions']`
Spatial dispersions for each gene (these are used to compute the
SAM weights)
`.var['mask_genes']`
If preprocessed with SAM, this boolean vector indicates which genes
were filtered out (=False).
`.uns['preprocess_args']`
Dictionary of parameters used for preprocessing.
`.uns['run_args']`
Dictionary of parameters used for running SAM.
`.uns['pca_obj']`
The sklearn.decomposition.PCA object.
`.uns['X_processed']`
The standardized and SAM-weighted data fed into PCA.
`.uns['neighbors']`
A dictionary with key 'connectivities' containing the kNN adjacency
matrix output by SAM. If built-in scanpy dimensionality reduction
methods are to be used using the SAM-output AnnData, users
should recompute the neighbors using `.obs['X_pca']` with
`scanpy.pp.neighbors`.
`.uns['ranked_genes']`
Gene IDs ranked in descending order by their SAM weights.
`.obsm['X_pca']`
The principal components output by SAM.
`.obsm['X_umap']`
The UMAP projection output by SAM.
`.layers['X_disp']`
The expression matrix used for nearest-neighbor averaging.
`.layers['X_knn_avg']`
The nearest-neighbor-averaged expression data used for computing the
spatial dispersions of genes.
Example
-------
>>> import scanpy.external as sce
>>> import scanpy as sc
*** Running SAM ***
Assuming we are given an AnnData object called `adata`, we can run the SAM
algorithm as follows:
>>> sam,adata = sce.tl.SAM(adata,inplace=True)
The input AnnData object should contain unstandardized, non-negative
expression values. Preferably, the data should be log-normalized and no
genes should be filtered out.
Please see the documentation for a description of all available parameters.
For more detailed tutorials, please visit the original Github repository:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial
*** Plotting ***
To visualize the output, we can use the built-in `scatter` function (this
assumes that `matplotlib` is installed.)
>>> sam.scatter(projection = 'X_umap')
`scatter` accepts all keyword arguments used in the
`matplotlib.pyplot.scatter` function. Please visit the plotting tutorials
for more information:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial/SAM_Plotting
*** SAMGUI ***
SAM comes with the SAMGUI module, a graphical-user interface written with
`Plotly` and `ipythonwidgets` for interactively exploring and annotating
the scRNAseq data and running SAM.
Dependencies can be installed with Anaconda by following the instructions in
the self-assembling-manifold Github README:
https://github.com/atarashansky/self-assembling-manifold
In a Jupyter notebook, execute the following to launch the interface:
>>> from SAMGUI import SAMGUI
>>> sam_gui = SAMGUI(sam) # sam is your SAM object
>>> sam_gui.SamPlot
This can also be enabled in Jupyer Lab by following the instructions in the
self-assembling-manifold README.
"""
logg.info('Self-assembling manifold')
try:
from SAM import SAM
except ImportError:
raise ImportError(
'\nplease install sam-algorithm: \n\n'
'\tgit clone git://github.com/atarashansky/self-assembling-manifold.git\n'
'\tcd self-assembling-manifold\n'
'\tpip install .'
)
s = SAM(counts=adata, inplace=inplace)
logg.info('Running SAM')
s.run(
max_iter=max_iter,
num_norm_avg=num_norm_avg,
k=k,
distance=distance,
preprocessing=standardization,
weight_PCs=weight_pcs,
npcs=npcs,
n_genes=n_genes,
projection=projection,
verbose=verbose,
)
return (s, adata) if inplace else (s, s.adata)
| def sam(
adata: AnnData,
max_iter: int = 10,
num_norm_avg: int = 50,
k: int = 20,
distance: str = 'correlation',
standardization: Optional[str] = 'Normalizer',
weight_pcs: bool = True,
npcs: Optional[int] = None,
n_genes: Optional[int] = None,
projection: Optional[str] = 'umap',
inplace: bool = True,
verbose: bool = True,
) -> Optional[AnnData]:
"""\
Self-Assembling Manifolds single-cell RNA sequencing analysis tool.
SAM iteratively rescales the input gene expression matrix to emphasize
genes that are spatially variable along the intrinsic manifold of the data.
It outputs the gene weights, nearest neighbor matrix, and a 2D projection.
The AnnData input should contain unstandardized, non-negative values.
Preferably, the data should be log-normalized and no genes should be filtered out.
Parameters
----------
k - int, optional, default 20
The number of nearest neighbors to identify for each cell.
distance : string, optional, default 'correlation'
The distance metric to use when identifying nearest neighbors.
Can be any of the distance metrics supported by sklearn's 'pdist'.
max_iter - int, optional, default 10
The maximum number of iterations SAM will run.
projection - str, optional, default 'umap'
If 'tsne', generates a t-SNE embedding. If 'umap', generates a UMAP
embedding. Otherwise, no embedding will be generated.
standardization - str, optional, default 'Normalizer'
If 'Normalizer', use sklearn.preprocessing.Normalizer, which
normalizes expression data prior to PCA such that each cell has
unit L2 norm. If 'StandardScaler', use
sklearn.preprocessing.StandardScaler, which normalizes expression
data prior to PCA such that each gene has zero mean and unit
variance. Otherwise, do not normalize the expression data. We
recommend using 'StandardScaler' for large datasets with many
expected cell types and 'Normalizer' otherwise.
num_norm_avg - int, optional, default 50
The top 'num_norm_avg' dispersions are averaged to determine the
normalization factor when calculating the weights. This prevents
genes with large spatial dispersions from skewing the distribution
of weights.
weight_pcs - bool, optional, default True
If True, scale the principal components by their eigenvalues. In
datasets with many expected cell types, setting this to False might
improve the resolution as these cell types might be encoded by low-
variance principal components.
npcs - int, optional, default None,
Determines the number of top principal components selected at each
iteration of the SAM algorithm. If None, this number is chosen
automatically based on the size of the dataset. If weight_pcs is
set to True, this parameter primarily affects the runtime of the SAM
algorithm (more PCs = longer runtime).
n_genes - int, optional, default None:
Determines the number of top SAM-weighted genes to use at each iteration
of the SAM algorithm. If None, this number is chosen automatically
based on the size of the dataset. This parameter primarily affects
the runtime of the SAM algorithm (more genes = longer runtime).
inplace - bool, optional, default True:
Set fields in `adata` if True. Otherwise, returns a copy.
verbose - bool, optional, default True:
If True, displays SAM log statements.
Returns
-------
sam - SAM
The SAM object
adata - AnnData
`.var['weights']`
SAM weights for each gene.
`.var['spatial_dispersions']`
Spatial dispersions for each gene (these are used to compute the
SAM weights)
`.var['mask_genes']`
If preprocessed with SAM, this boolean vector indicates which genes
were filtered out (=False).
`.uns['preprocess_args']`
Dictionary of parameters used for preprocessing.
`.uns['run_args']`
Dictionary of parameters used for running SAM.
`.uns['pca_obj']`
The sklearn.decomposition.PCA object.
`.uns['X_processed']`
The standardized and SAM-weighted data fed into PCA.
`.uns['neighbors']`
A dictionary with key 'connectivities' containing the kNN adjacency
matrix output by SAM. If built-in scanpy dimensionality reduction
methods are to be used using the SAM-output AnnData, users
should recompute the neighbors using `.obs['X_pca']` with
`scanpy.pp.neighbors`.
`.uns['ranked_genes']`
Gene IDs ranked in descending order by their SAM weights.
`.obsm['X_pca']`
The principal components output by SAM.
`.obsm['X_umap']`
The UMAP projection output by SAM.
`.layers['X_disp']`
The expression matrix used for nearest-neighbor averaging.
`.layers['X_knn_avg']`
The nearest-neighbor-averaged expression data used for computing the
spatial dispersions of genes.
Example
-------
>>> import scanpy.external as sce
>>> import scanpy as sc
*** Running SAM ***
Assuming we are given an AnnData object called `adata`, we can run the SAM
algorithm as follows:
>>> sam,adata = sce.tl.SAM(adata,inplace=True)
The input AnnData object should contain unstandardized, non-negative
expression values. Preferably, the data should be log-normalized and no
genes should be filtered out.
Please see the documentation for a description of all available parameters.
For more detailed tutorials, please visit the original Github repository:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial
*** Plotting ***
To visualize the output, we can use the built-in `scatter` function (this
assumes that `matplotlib` is installed.)
>>> sam.scatter(projection = 'X_umap')
`scatter` accepts all keyword arguments used in the
`matplotlib.pyplot.scatter` function. Please visit the plotting tutorials
for more information:
https://github.com/atarashansky/self-assembling-manifold/tree/master/tutorial/SAM_Plotting
*** SAMGUI ***
SAM comes with the SAMGUI module, a graphical-user interface written with
`Plotly` and `ipythonwidgets` for interactively exploring and annotating
the scRNAseq data and running SAM.
Dependencies can be installed with Anaconda by following the instructions in
the self-assembling-manifold Github README:
https://github.com/atarashansky/self-assembling-manifold
In a Jupyter notebook, execute the following to launch the interface:
>>> from SAMGUI import SAMGUI
>>> sam_gui = SAMGUI(sam) # sam is your SAM object
>>> sam_gui.SamPlot
This can also be enabled in Jupyer Lab by following the instructions in the
self-assembling-manifold README.
"""
logg.info('Self-assembling manifold')
try:
from SAM import SAM
except ImportError:
raise ImportError(
'\nplease install sam-algorithm: \n\n'
'\tgit clone git://github.com/atarashansky/self-assembling-manifold.git\n'
'\tcd self-assembling-manifold\n'
'\tpip install .'
)
s = SAM(counts=adata, inplace=inplace)
logg.info('Running SAM')
s.run(
max_iter=max_iter,
num_norm_avg=num_norm_avg,
k=k,
distance=distance,
preprocessing=standardization,
weight_PCs=weight_pcs,
npcs=npcs,
n_genes=n_genes,
projection=projection,
verbose=verbose,
)
return (s, adata) if inplace else (s, s.adata)
|
38,798 | def distribute_tests(testcases, skip_system_check, skip_prgenv_check,
node_map):
temporary_registry = None
new_checks = []
for t in testcases:
if not t.check.is_fixture():
cls = type(t.check)
basename = cls.__name__
original_var_info = cls.get_variant_info(
t.check.variant_num, recurse=True
)
def _rfm_distributed_set_run_nodes(obj):
if not obj.local:
obj.job.pin_nodes = obj._rfm_nodelist
def _rfm_distributed_set_build_nodes(obj):
if not obj.local and not obj.build_locally:
obj.build_job.pin_nodes = obj._rfm_nodelist
# We re-set the valid system and environment in a hook to
# make sure that it will not be overwriten by a parent
# post-init hook
def _rfm_distributed_set_valid_sys_env(obj):
obj.valid_systems = [t._partition.fullname]
obj.valid_prog_environs = [t._environ.name]
class BaseTest(t.check.__class__):
_rfm_nodelist = builtins.parameter(node_map[t._partition.fullname])
valid_systems = [t._partition.fullname]
valid_prog_environs = [t._environ.name]
nc = make_test(
f'__D_{t._partition.name}_{t._environ.name}_{basename}',
(BaseTest, ),
{},
methods=[
builtins.run_before('run')(_rfm_distributed_set_run_nodes),
builtins.run_before('compile')(_rfm_distributed_set_build_nodes),
# TODO this hook is not working properly
# builtins.run_after('init')(_rfm_distributed_set_valid_sys_env),
]
)
# We have to set the prefix manually
nc._rfm_dynamic_test_prefix = t.check.prefix
for i in range(nc.num_variants):
# Check if this variant should be instantiated
var_info = copy.deepcopy(nc.get_variant_info(i, recurse=True))
var_info['params'].pop('_rfm_nodelist')
if var_info == original_var_info:
if temporary_registry is None:
temporary_registry = TestRegistry.create(nc, variant_num=i)
else:
temporary_registry.add(nc, variant_num=i)
if temporary_registry:
new_checks = temporary_registry.instantiate_all()
return generate_testcases(new_checks, skip_system_check,
skip_prgenv_check)
else:
return []
| def distribute_tests(testcases, skip_system_check, skip_prgenv_check,
node_map):
temporary_registry = None
new_checks = []
for tc in testcases:
if not t.check.is_fixture():
cls = type(t.check)
basename = cls.__name__
original_var_info = cls.get_variant_info(
t.check.variant_num, recurse=True
)
def _rfm_distributed_set_run_nodes(obj):
if not obj.local:
obj.job.pin_nodes = obj._rfm_nodelist
def _rfm_distributed_set_build_nodes(obj):
if not obj.local and not obj.build_locally:
obj.build_job.pin_nodes = obj._rfm_nodelist
# We re-set the valid system and environment in a hook to
# make sure that it will not be overwriten by a parent
# post-init hook
def _rfm_distributed_set_valid_sys_env(obj):
obj.valid_systems = [t._partition.fullname]
obj.valid_prog_environs = [t._environ.name]
class BaseTest(t.check.__class__):
_rfm_nodelist = builtins.parameter(node_map[t._partition.fullname])
valid_systems = [t._partition.fullname]
valid_prog_environs = [t._environ.name]
nc = make_test(
f'__D_{t._partition.name}_{t._environ.name}_{basename}',
(BaseTest, ),
{},
methods=[
builtins.run_before('run')(_rfm_distributed_set_run_nodes),
builtins.run_before('compile')(_rfm_distributed_set_build_nodes),
# TODO this hook is not working properly
# builtins.run_after('init')(_rfm_distributed_set_valid_sys_env),
]
)
# We have to set the prefix manually
nc._rfm_dynamic_test_prefix = t.check.prefix
for i in range(nc.num_variants):
# Check if this variant should be instantiated
var_info = copy.deepcopy(nc.get_variant_info(i, recurse=True))
var_info['params'].pop('_rfm_nodelist')
if var_info == original_var_info:
if temporary_registry is None:
temporary_registry = TestRegistry.create(nc, variant_num=i)
else:
temporary_registry.add(nc, variant_num=i)
if temporary_registry:
new_checks = temporary_registry.instantiate_all()
return generate_testcases(new_checks, skip_system_check,
skip_prgenv_check)
else:
return []
|
5,223 | def test_subclass_clear_cla():
# Ensure that subclasses of Axes call cla/clear correctly.
# Note, we cannot use mocking here as we want to be sure that the
# superclass fallback does not recurse.
class ClaAxes(Axes):
def cla(self):
nonlocal called
called = True
class ClaSuperAxes(Axes):
def cla(self):
nonlocal called
called = True
super().cla()
class ClearAxes(Axes):
def clear(self):
nonlocal called
called = True
class ClearSuperAxes(Axes):
def clear(self):
nonlocal called
called = True
super().clear()
fig = Figure()
for axes_class in [ClaAxes, ClearSuperAxes, ClearAxes, ClearSuperAxes]:
called = False
ax = axes_class(fig, [0, 0, 1, 1])
# Axes.__init__ has already called clear (which aliases to cla or is in
# the subclass).
assert called
called = False
ax.cla()
assert called
| def test_subclass_clear_cla():
# Ensure that subclasses of Axes call cla/clear correctly.
# Note, we cannot use mocking here as we want to be sure that the
# superclass fallback does not recurse.
class ClaAxes(Axes):
def cla(self):
nonlocal called
called = True
class ClaSuperAxes(Axes):
def cla(self):
nonlocal called
called = True
super().cla()
class ClearAxes(Axes):
def clear(self):
nonlocal called
called = True
class ClearSuperAxes(Axes):
def clear(self):
nonlocal called
called = True
super().clear()
fig = Figure()
for axes_class in [ClaAxes, ClaSuperAxes, ClearAxes, ClearSuperAxes]:
called = False
ax = axes_class(fig, [0, 0, 1, 1])
# Axes.__init__ has already called clear (which aliases to cla or is in
# the subclass).
assert called
called = False
ax.cla()
assert called
|
17,544 | def get_domain(var: xr.DataArray) -> str:
def check_sign(var):
if "cost" in var.name or var.name in ["resource", "loc_coordinates"]:
return ""
else:
return "NonNegative"
if var.dtype == bool:
return "Boolean"
elif is_numeric_dtype(var.dtype):
return check_sign(var) + "Reals"
else:
return "Any"
| def get_domain(var: xr.DataArray) -> str:
def check_sign(var):
if "cost" in var.name or var.name in ["resource", "loc_coordinates"]:
return ""
else:
return "NonNegative"
if var.dtype.kind == "b":
return "Boolean"
elif is_numeric_dtype(var.dtype):
return check_sign(var) + "Reals"
else:
return "Any"
|
46,270 | def _labeltypedict(key) -> ContextAction:
return {
'description': key,
'action': partial(_convert_dtype, mode=key),
'enable_when': 'not {}_label'.format(key),
'show_when': 'True'
}
| def _labeltypedict(key) -> ContextAction:
return {
'description': key,
'action': partial(_convert_dtype, mode=key),
'enable_when': f'only_labels_selected and active_layer_dtype != {key!r}',
'show_when': 'True'
}
|
43,368 | def poly_quad_expectations(mu, cov, wires, params, hbar=2.):
r"""Calculates the expectation and variance for an arbitrary
polynomial of quadrature operators.
Args:
mu (array): length-2 vector of means.
cov (array): :math:`2\times 2` covariance matrix.
wires (Sequence[int]): wires to calculate the expectation for.
params (array): a :math:`(2N+1)\times (2N+1)` array containing the linear
and quadratic coefficients of the quadrature operators
:math:`(I, \x_0, \p_0, \x_1, \p_1,\dots)`.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
Returns:
tuple: contains the quadrature expectation and variance.
"""
Q = params[0]
N = len(mu)//2
# HACK, we need access to the Poly instance in order to expand the matrix!
op = qm.expval.PolyXP(Q, wires=wires, do_queue=False)
Q = op.heisenberg_obs(N)
if Q.ndim == 1:
d = np.r_[Q[1::2], Q[2::2]]
return d.T @ mu + Q[0], d.T @ cov @ d
# convert to the (I, x1,x2,..., p1,p2...) ordering
M = np.vstack((Q[0:1, :], Q[1::2, :], Q[2::2, :]))
M = np.hstack((M[:, 0:1], M[:, 1::2], M[:, 2::2]))
d1 = M[1:, 0]
d2 = M[0, 1:]
A = M[1:, 1:]
d = d1 + d2
k = M[0, 0]
d2 = 2*A @ mu + d
k2 = mu.T @ A @ mu + mu.T @ d + k
ex = np.trace(A @ cov) + k2
var = 2*np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2
modes = np.arange(2*N).reshape(2, -1).T
groenewald_correction = np.sum([np.linalg.det(hbar*A[:, m][n]) for m in modes for n in modes])
var -= groenewald_correction
return ex, var
| def poly_quad_expectations(mu, cov, wires, params, hbar=2.):
r"""Calculates the expectation and variance for an arbitrary
polynomial of quadrature operators.
Args:
mu (array): length-2 vector of means.
cov (array): :math:`2\times 2` covariance matrix.
wires (Sequence[int]): wires to calculate the expectation for.
params (array): a :math:`(2N+1)\times (2N+1)` array containing the linear
and quadratic coefficients of the quadrature operators
:math:`(I, \x_0, \p_0, \x_1, \p_1,\dots)`.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
Returns:
tuple: contains the quadrature expectation and variance
"""
Q = params[0]
N = len(mu)//2
# HACK, we need access to the Poly instance in order to expand the matrix!
op = qm.expval.PolyXP(Q, wires=wires, do_queue=False)
Q = op.heisenberg_obs(N)
if Q.ndim == 1:
d = np.r_[Q[1::2], Q[2::2]]
return d.T @ mu + Q[0], d.T @ cov @ d
# convert to the (I, x1,x2,..., p1,p2...) ordering
M = np.vstack((Q[0:1, :], Q[1::2, :], Q[2::2, :]))
M = np.hstack((M[:, 0:1], M[:, 1::2], M[:, 2::2]))
d1 = M[1:, 0]
d2 = M[0, 1:]
A = M[1:, 1:]
d = d1 + d2
k = M[0, 0]
d2 = 2*A @ mu + d
k2 = mu.T @ A @ mu + mu.T @ d + k
ex = np.trace(A @ cov) + k2
var = 2*np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2
modes = np.arange(2*N).reshape(2, -1).T
groenewald_correction = np.sum([np.linalg.det(hbar*A[:, m][n]) for m in modes for n in modes])
var -= groenewald_correction
return ex, var
|
36,344 | def b64encode(s, altchars=None, padded=True):
"""Encode the bytes-like object s using Base64 and return a bytes object.
Optional altchars should be a byte string of length 2 which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
If padded is True (the default), padding will be be applied to the
result bytes. If padding if False, no padding is applied.
"""
encoded = binascii.b2a_base64(s, newline=False)
if altchars is not None:
assert len(altchars) == 2, repr(altchars)
encoded = encoded.translate(bytes.maketrans(b'+/', altchars))
if not padded:
encoded = encoded.rstrip(b'=')
return encoded
| def b64encode(s, altchars=None, padded=True):
"""Encode the bytes-like object s using Base64 and return a bytes object.
Optional altchars should be a byte string of length 2 which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
If padded is True (the default), padding will be applied to the
result bytes. If padding if False, no padding is applied.
"""
encoded = binascii.b2a_base64(s, newline=False)
if altchars is not None:
assert len(altchars) == 2, repr(altchars)
encoded = encoded.translate(bytes.maketrans(b'+/', altchars))
if not padded:
encoded = encoded.rstrip(b'=')
return encoded
|
3,513 | def map_host_to_project(request):
"""
Take the incoming host, and map it to the proper Project.
We check, in order:
* The ``HTTP_X_RTD_SLUG`` host header for explicit Project mapping
* The ``PUBLIC_DOMAIN`` where we can use the subdomain as the project name
* The hostname without port information, which maps to ``Domain`` objects
"""
host = request.get_host().lower().split(':')[0]
public_domain = settings.PUBLIC_DOMAIN.lower().split(':')[0]
host_parts = host.split('.')
public_domain_parts = public_domain.split('.')
# Explicit Project slug being passed in
if 'HTTP_X_RTD_SLUG' in request.META:
project = request.META['HTTP_X_RTD_SLUG'].lower()
request.rtdheader = True
elif public_domain in host:
# Serve from the PUBLIC_DOMAIN, ensuring it looks like `foo.PUBLIC_DOMAIN`
if public_domain_parts == host_parts[1:]:
project = host_parts[0]
request.subdomain = True
log.debug('Proxito Public Domain: %s', host)
else:
# TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example
# But these feel like they might be phishing, etc. so let's block them for now.
project = None
log.warning('Weird variation on our hostname: %s', host)
raise Http404(f'404: Invalid domain matching {public_domain}')
# Serve CNAMEs
else:
domain_qs = Domain.objects.filter(domain=host).prefetch_related('project')
if domain_qs.exists():
project = domain_qs.first().project.slug
request.cname = True
log.debug('Proxito CNAME: %s', host)
else:
# Some person is CNAMEing to us without configuring a domain - 404.
project = None
log.debug('CNAME 404: %s', host)
raise Http404('CNAME 404')
log.debug('Proxito Project: %s', project)
return project
| def map_host_to_project(request):
"""
Take the request and map the host to the proper project slug.
We check, in order:
* The ``HTTP_X_RTD_SLUG`` host header for explicit Project mapping
* The ``PUBLIC_DOMAIN`` where we can use the subdomain as the project name
* The hostname without port information, which maps to ``Domain`` objects
"""
host = request.get_host().lower().split(':')[0]
public_domain = settings.PUBLIC_DOMAIN.lower().split(':')[0]
host_parts = host.split('.')
public_domain_parts = public_domain.split('.')
# Explicit Project slug being passed in
if 'HTTP_X_RTD_SLUG' in request.META:
project = request.META['HTTP_X_RTD_SLUG'].lower()
request.rtdheader = True
elif public_domain in host:
# Serve from the PUBLIC_DOMAIN, ensuring it looks like `foo.PUBLIC_DOMAIN`
if public_domain_parts == host_parts[1:]:
project = host_parts[0]
request.subdomain = True
log.debug('Proxito Public Domain: %s', host)
else:
# TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example
# But these feel like they might be phishing, etc. so let's block them for now.
project = None
log.warning('Weird variation on our hostname: %s', host)
raise Http404(f'404: Invalid domain matching {public_domain}')
# Serve CNAMEs
else:
domain_qs = Domain.objects.filter(domain=host).prefetch_related('project')
if domain_qs.exists():
project = domain_qs.first().project.slug
request.cname = True
log.debug('Proxito CNAME: %s', host)
else:
# Some person is CNAMEing to us without configuring a domain - 404.
project = None
log.debug('CNAME 404: %s', host)
raise Http404('CNAME 404')
log.debug('Proxito Project: %s', project)
return project
|
35,367 | def _get_available_base_ansys():
"""Return a dictionary of available ANSYS versions with their base paths.
Returns
-------
Return all installed ANSYS paths in Windows
>>> _get_available_base_ansys()
{194: 'C:\\Program Files\\ANSYS INC\\v194',
202: 'C:\\Program Files\\ANSYS INC\\v202',
211: 'C:\\Program Files\\ANSYS INC\\v211'}
Within Linux
>>> _get_available_base_ansys()
{194: '/usr/ansys_inc/v194',
202: '/usr/ansys_inc/v202',
211: '/usr/ansys_inc/v211'}
"""
base_path = None
if os.name == "nt":
supported_versions = SUPPORTED_ANSYS_VERSIONS
awp_roots = {
ver: os.environ.get(f"AWP_ROOT{ver}", "") for ver in supported_versions
}
installed_versions = {
ver: path for ver, path in awp_roots.items() if path and os.path.isdir(path)
}
if installed_versions:
return installed_versions
else: # pragma: no cover
LOG.debug(
"No installed ANSYS found using 'AWP_ROOT' environments. Let's suppose a base path."
)
base_path = os.path.join(os.environ["PROGRAMFILES"], "ANSYS INC")
if not os.path.exists(base_path):
LOG.debug(
f"The supposed 'base_path'{base_path} does not exist. No available ansys found."
)
return {}
elif os.name == "posix":
for path in ["/usr/ansys_inc", "/ansys_inc"]:
if os.path.isdir(path):
base_path = path
else: # pragma: no cover
raise OSError(f"Unsupported OS {os.name}")
if base_path is None:
return {}
paths = glob(os.path.join(base_path, "v*"))
# Testing for ANSYS STUDENT version
if not paths: # pragma: no cover
paths = glob(os.path.join(base_path, "ANSYS*"))
if not paths:
return {}
ansys_paths = {}
for path in paths:
ver_str = path[-3:]
if is_float(ver_str):
ansys_paths[int(ver_str)] = path
return ansys_paths
| def _get_available_base_ansys():
"""Return a dictionary of available ANSYS versions with their base paths.
Returns
-------
Return all installed ANSYS paths in Windows
>>> _get_available_base_ansys()
{194: 'C:\\Program Files\\ANSYS INC\\v194',
202: 'C:\\Program Files\\ANSYS INC\\v202',
211: 'C:\\Program Files\\ANSYS INC\\v211'}
Within Linux
>>> _get_available_base_ansys()
{194: '/usr/ansys_inc/v194',
202: '/usr/ansys_inc/v202',
211: '/usr/ansys_inc/v211'}
"""
base_path = None
if os.name == "nt": # pragma: no cover
supported_versions = SUPPORTED_ANSYS_VERSIONS
awp_roots = {
ver: os.environ.get(f"AWP_ROOT{ver}", "") for ver in supported_versions
}
installed_versions = {
ver: path for ver, path in awp_roots.items() if path and os.path.isdir(path)
}
if installed_versions:
return installed_versions
else: # pragma: no cover
LOG.debug(
"No installed ANSYS found using 'AWP_ROOT' environments. Let's suppose a base path."
)
base_path = os.path.join(os.environ["PROGRAMFILES"], "ANSYS INC")
if not os.path.exists(base_path):
LOG.debug(
f"The supposed 'base_path'{base_path} does not exist. No available ansys found."
)
return {}
elif os.name == "posix":
for path in ["/usr/ansys_inc", "/ansys_inc"]:
if os.path.isdir(path):
base_path = path
else: # pragma: no cover
raise OSError(f"Unsupported OS {os.name}")
if base_path is None:
return {}
paths = glob(os.path.join(base_path, "v*"))
# Testing for ANSYS STUDENT version
if not paths: # pragma: no cover
paths = glob(os.path.join(base_path, "ANSYS*"))
if not paths:
return {}
ansys_paths = {}
for path in paths:
ver_str = path[-3:]
if is_float(ver_str):
ansys_paths[int(ver_str)] = path
return ansys_paths
|
1,827 | def paired_haversine_distances(X, Y):
"""
Computes the paired Haversine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, 2)
Y : array-like, shape (n_samples, 2)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
if len(X.shape) != 2 and len(Y.shape) != 2:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape and Y.shape should be of to 2 dimension")
if not (X.shape[1] == 2 and Y.shape[1] == 2):
raise ValueError("Haversine distance only valid in 2 dimensions")
X, Y = map(np.radians, [X, Y])
delta = Y-X
lat1, lat2 = X[:, 1], Y[:, 1]
dlon, dlat = delta[:, 0], delta[:, 1]
a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
c = 2 * np.arcsin(np.sqrt(a))
km = 6367 * c
return km
| def paired_haversine_distances(X, Y):
"""
Computes the paired Haversine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, 2)
Y : array-like, shape (n_samples, 2)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
if len(X.shape) != 2 and len(Y.shape) != 2:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape and Y.shape should be of to 2 dimension")
if not (X.shape[1] == 2 and Y.shape[1] == 2):
raise ValueError("Haversine distance only valid in 2 dimensions")
X, Y = np.radians(X), np.radians(Y)
delta = Y-X
lat1, lat2 = X[:, 1], Y[:, 1]
dlon, dlat = delta[:, 0], delta[:, 1]
a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
c = 2 * np.arcsin(np.sqrt(a))
km = 6367 * c
return km
|
2,166 | def check_outliers_fit_predict(name, estimator_orig, strict_mode=True):
# Check fit_predict for outlier detectors.
n_samples = 300
X, _ = make_blobs(n_samples=n_samples, random_state=0)
X = shuffle(X, random_state=7)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
y_pred = estimator.fit_predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == 'i'
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
# check fit_predict = fit.predict when the estimator has both a predict and
# a fit_predict method. recall that it is already assumed here that the
# estimator has a fit_predict method
if hasattr(estimator, 'predict'):
y_pred_2 = estimator.fit(X).predict(X)
assert_array_equal(y_pred, y_pred_2)
if hasattr(estimator, "contamination"):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'
expected_outliers = 30
contamination = float(expected_outliers)/n_samples
estimator.set_params(contamination=contamination)
y_pred = estimator.fit_predict(X)
num_outliers = np.sum(y_pred != 1)
# num_outliers should be equal to expected_outliers unless
# there are ties in the decision_function values. this can
# only be tested for estimators with a decision_function
# method
if (num_outliers != expected_outliers and
hasattr(estimator, 'decision_function')):
decision = estimator.decision_function(X)
check_outlier_corruption(num_outliers, expected_outliers, decision)
# raises error when contamination is a scalar and not in [0,1]
msg = r"contamination must be in \(0, 0.5]"
for contamination in [-0.5, 2.3]:
estimator.set_params(contamination=contamination)
with raises(ValueError, match=msg):
estimator.fit_predict(X)
| def check_outliers_fit_predict(name, estimator_orig, strict_mode=True):
# Check fit_predict for outlier detectors.
n_samples = 300
X, _ = make_blobs(n_samples=n_samples, random_state=0)
X = shuffle(X, random_state=7)
n_samples, n_features = X.shape
estimator = clone(estimator_orig)
set_random_state(estimator)
y_pred = estimator.fit_predict(X)
assert y_pred.shape == (n_samples,)
assert y_pred.dtype.kind == 'i'
assert_array_equal(np.unique(y_pred), np.array([-1, 1]))
# check fit_predict = fit.predict when the estimator has both a predict and
# a fit_predict method. recall that it is already assumed here that the
# estimator has a fit_predict method
if hasattr(estimator, 'predict'):
y_pred_2 = estimator.fit(X).predict(X)
assert_array_equal(y_pred, y_pred_2)
if hasattr(estimator, "contamination"):
# proportion of outliers equal to contamination parameter when not
# set to 'auto'
expected_outliers = 30
contamination = float(expected_outliers)/n_samples
estimator.set_params(contamination=contamination)
y_pred = estimator.fit_predict(X)
num_outliers = np.sum(y_pred != 1)
# num_outliers should be equal to expected_outliers unless
# there are ties in the decision_function values. this can
# only be tested for estimators with a decision_function
# method
if (num_outliers != expected_outliers and
hasattr(estimator, 'decision_function')):
decision = estimator.decision_function(X)
check_outlier_corruption(num_outliers, expected_outliers, decision)
# raises error when contamination is a scalar and not in [0,1]
msg = r"contamination must be in \(0, 0.5]"
for contamination in [-0.5, -0.001, 0.5001, 2.3]:
estimator.set_params(contamination=contamination)
with raises(ValueError, match=msg):
estimator.fit_predict(X)
|
11,542 | def upgrade():
op.add_column("user", sa.Column("title", sa.UnicodeText()))
| def upgrade():
op.add_column("user", sa.Column("comms_opt_in", sa.UnicodeText()))
|
7,268 | def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbour set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern:
``default``
Original local binary pattern which is gray scale but not
rotation invariant.
``ror``
Extension of default implementation which is gray scale and
rotation invariant.
``uniform``
Improved rotation invariance with uniform patterns and finer
quantization of the angular space which is gray scale and
rotation invariant.
``nri_uniform``
Non rotation-invariant uniform patterns variant which is
only gray scale invariant [2]_.
``var``
Rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/pdf_94.pdf, 2002.
.. [2] Face recognition with local binary patterns.
Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
2004.
"""
check_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
| def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbour set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern:
``default``
Original local binary pattern which is grayscale but not
rotation invariant.
``ror``
Extension of default implementation which is gray scale and
rotation invariant.
``uniform``
Improved rotation invariance with uniform patterns and finer
quantization of the angular space which is gray scale and
rotation invariant.
``nri_uniform``
Non rotation-invariant uniform patterns variant which is
only gray scale invariant [2]_.
``var``
Rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/pdf_94.pdf, 2002.
.. [2] Face recognition with local binary patterns.
Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
2004.
"""
check_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
|
9,123 | def getTypedValue(data_type, value):
'''Utility function to cast a string value to the appropriate XSD type. '''
# If the default value is empty
if value is None:
return value
if data_type == 'boolean':
return True if value.lower() == 'true' else False
elif data_type == 'integer':
return int(value)
elif data_type == 'float':
return float(value)
elif data_type == 'string':
return str(value)
else:
return value # no type casting
| def getTypedValue(data_type, value):
'''Utility function to cast a string value to the appropriate XSD type. '''
# If the default value is empty
if value is None:
return
if data_type == 'boolean':
return True if value.lower() == 'true' else False
elif data_type == 'integer':
return int(value)
elif data_type == 'float':
return float(value)
elif data_type == 'string':
return str(value)
else:
return value # no type casting
|
11,874 | def read_32(fobj, start_length, size):
"""
Read a 32bit RGB icon resource. Seems to be either uncompressed or
an RLE packbits-like scheme.
"""
(start, length) = start_length
fobj.seek(start)
pixel_size = (size[0] * size[2], size[1] * size[2])
sizesq = pixel_size[0] * pixel_size[1]
if length == sizesq * 3:
# uncompressed ("RGBRGBGB")
indata = fobj.read(length)
im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1)
else:
# decode image
im = Image.new("RGB", pixel_size, None)
for band_ix in range(3):
data = []
bytesleft = sizesq
while bytesleft > 0:
byte = fobj.read(1)
if not byte:
break
byte = i8(byte)
if byte & 0x80:
blocksize = byte - 125
byte = fobj.read(1)
for i in range(blocksize):
data.append(byte)
else:
blocksize = byte + 1
data.append(fobj.read(blocksize))
bytesleft -= blocksize
if bytesleft <= 0:
break
if bytesleft != 0:
raise SyntaxError(f"Error reading channel [{repr(bytesleft)} left]")
band = Image.frombuffer("L", pixel_size, b"".join(data), "raw", "L", 0, 1)
im.im.putband(band.im, band_ix)
return {"RGB": im}
| def read_32(fobj, start_length, size):
"""
Read a 32bit RGB icon resource. Seems to be either uncompressed or
an RLE packbits-like scheme.
"""
(start, length) = start_length
fobj.seek(start)
pixel_size = (size[0] * size[2], size[1] * size[2])
sizesq = pixel_size[0] * pixel_size[1]
if length == sizesq * 3:
# uncompressed ("RGBRGBGB")
indata = fobj.read(length)
im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1)
else:
# decode image
im = Image.new("RGB", pixel_size, None)
for band_ix in range(3):
data = []
bytesleft = sizesq
while bytesleft > 0:
byte = fobj.read(1)
if not byte:
break
byte = i8(byte)
if byte & 0x80:
blocksize = byte - 125
byte = fobj.read(1)
for i in range(blocksize):
data.append(byte)
else:
blocksize = byte + 1
data.append(fobj.read(blocksize))
bytesleft -= blocksize
if bytesleft <= 0:
break
if bytesleft != 0:
raise SyntaxError(f"Error reading channel [{bytesleft!r} left]")
band = Image.frombuffer("L", pixel_size, b"".join(data), "raw", "L", 0, 1)
im.im.putband(band.im, band_ix)
return {"RGB": im}
|
24,346 | def get_metadata_from_http(raw_version):
if raw_version == "":
get_check_logger().debug("unable to find HAProxy version info")
return None
else:
return VERSION_PATTERN.search(raw_version).group(1)
| def get_version_from_http(raw_version):
if raw_version == "":
get_check_logger().debug("unable to find HAProxy version info")
return None
else:
return VERSION_PATTERN.search(raw_version).group(1)
|
10,751 | def literally(obj):
"""Forces numba to take *obj* as an Literal value.
*obj* must be either a literal or an argument of the caller function, where
the argument must be bounded to a literal. The literal requirement
propagates up the call stack.
This function is intercepted by the compiler to alter its behavior to
wrap corresponding function parameters as ``Literal``. It has **no effect**
outside of nopython-mode (interpreter, and objectmode).
The current implementation detects literal arguments in two ways:
1. Scan uses of ``literally`` via a rewrite pass.
2. ``literally`` is overloaded to raise ``numba.errors.ForceLiteralArg``
to signal the dispatcher to treat the corresponding parameter
differently. This mode is to support indirect use (via a function call).
The execution semantic of this function is equivalent to an identity
function.
See :ghfile:`numba/tests/test_literal_dispatch.py` for examples.
"""
return obj
| def literally(obj):
"""Forces Numba to interpret *obj* as an Literal value.
*obj* must be either a literal or an argument of the caller function, where
the argument must be bounded to a literal. The literal requirement
propagates up the call stack.
This function is intercepted by the compiler to alter its behavior to
wrap corresponding function parameters as ``Literal``. It has **no effect**
outside of nopython-mode (interpreter, and objectmode).
The current implementation detects literal arguments in two ways:
1. Scan uses of ``literally`` via a rewrite pass.
2. ``literally`` is overloaded to raise ``numba.errors.ForceLiteralArg``
to signal the dispatcher to treat the corresponding parameter
differently. This mode is to support indirect use (via a function call).
The execution semantic of this function is equivalent to an identity
function.
See :ghfile:`numba/tests/test_literal_dispatch.py` for examples.
"""
return obj
|
28,049 | def get_report_hashes(
baseline_file_paths: List[str]
) -> Set[str]:
""" Get uniqued hashes from baseline files. """
report_hashes = set()
for file_path in baseline_file_paths:
with open(file_path, mode='r', encoding='utf-8', errors="ignore") as f:
report_hashes = report_hashes.union(__get_report_hashes(f))
return report_hashes
| def get_report_hashes(
baseline_file_paths: Iterable[str]
) -> Set[str]:
""" Get uniqued hashes from baseline files. """
report_hashes = set()
for file_path in baseline_file_paths:
with open(file_path, mode='r', encoding='utf-8', errors="ignore") as f:
report_hashes = report_hashes.union(__get_report_hashes(f))
return report_hashes
|
32,171 | def cyble_fetch_events(client, method, args):
"""
Fetch alert details from server for creating incidents in XSOAR
:param client: instace of client to communicate with server
:param method: Requests method to be used
:param args: parameters for fetching event details
:return: events from the server
"""
params = {
'token': args['token'],
'from': int(args['from']),
'limit': int(args['limit']),
'start_date': args['start_date'],
'end_date': args['end_date'],
'order_by': args['order_by']
}
events_url = r'/api/v2/events/all'
result = client.get_alerts(method, events_url, params)
incidents: List[Dict[str, Any]] = []
if result is not None:
eventTypes = get_event_types(client, "GET", args['token'])
incidents = format_incidents(result, eventTypes)
return incidents
| def cyble_fetch_events(client, method, args):
"""
Fetch alert details from server for creating incidents in XSOAR
:param client: instace of client to communicate with server
:param method: Requests method to be used
:param args: parameters for fetching event details
:return: events from the server
"""
params = {
'token': args['token'],
'from': int(args['from']),
'limit': int(args['limit']),
'start_date': args['start_date'],
'end_date': args['end_date'],
'order_by': args['order_by']
}
events_url = r'/api/v2/events/all'
result = client.get_alerts(method, events_url, params)
incidents: List[Dict[str, Any]] = []
if result:
eventTypes = get_event_types(client, "GET", args['token'])
incidents = format_incidents(result, eventTypes)
return incidents
|
28,548 | def compare(
dataset_dict,
ic="waic",
method="BB-pseudo-BMA",
b_samples=1000,
alpha=1,
seed=None,
scale="deviance",
):
r"""Compare models based on WAIC or LOO cross-validation.
WAIC is the Widely applicable information criterion, and LOO is leave-one-out
(LOO) cross-validation. Read more theory here - in a paper by some of the
leading authorities on model selection - dx.doi.org/10.1111/1467-9868.00353
Parameters
----------
dataset_dict : dict[str] -> InferenceData
A dictionary of model names and InferenceData objects
ic : str
Information Criterion (WAIC or LOO) used to compare models. Default WAIC.
method : str
Method used to estimate the weights for each model. Available options are:
- 'stacking' : stacking of predictive distributions.
- 'BB-pseudo-BMA' : (default) pseudo-Bayesian Model averaging using Akaike-type
weighting. The weights are stabilized using the Bayesian bootstrap.
- 'pseudo-BMA': pseudo-Bayesian Model averaging using Akaike-type
weighting, without Bootstrap stabilization (not recommended).
For more information read https://arxiv.org/abs/1704.02030
b_samples: int
Number of samples taken by the Bayesian bootstrap estimation.
Only useful when method = 'BB-pseudo-BMA'.
alpha : float
The shape parameter in the Dirichlet distribution used for the Bayesian bootstrap. Only
useful when method = 'BB-pseudo-BMA'. When alpha=1 (default), the distribution is uniform
on the simplex. A smaller alpha will keeps the final weights more away from 0 and 1.
seed : int or np.random.RandomState instance
If int or RandomState, use it for seeding Bayesian bootstrap. Only
useful when method = 'BB-pseudo-BMA'. Default None the global
np.random state is used.
scale : str
Output scale for IC. Available options are:
- `deviance` : (default) -2 * (log-score)
- `log` : 1 * log-score (after Vehtari et al. (2017))
- `negative_log` : -1 * (log-score)
Returns
-------
A DataFrame, ordered from lowest to highest IC. The index reflects the key with which the
models are passed to this function. The columns are:
IC : Information Criteria (WAIC or LOO).
Smaller IC indicates higher out-of-sample predictive fit ("better" model). Default WAIC.
If `scale == log` higher IC indicates higher out-of-sample predictive fit ("better" model).
pIC : Estimated effective number of parameters.
dIC : Relative difference between each IC (WAIC or LOO) and the lowest IC (WAIC or LOO).
It's always 0 for the top-ranked model.
weight: Relative weight for each model.
This can be loosely interpreted as the probability of each model (among the compared model)
given the data. By default the uncertainty in the weights estimation is considered using
Bayesian bootstrap.
SE : Standard error of the IC estimate.
If method = BB-pseudo-BMA these values are estimated using Bayesian bootstrap.
dSE : Standard error of the difference in IC between each model and the top-ranked model.
It's always 0 for the top-ranked model.
warning : A value of 1 indicates that the computation of the IC may not be reliable.
This could be indication of WAIC/LOO starting to fail see
http://arxiv.org/abs/1507.04544 for details.
scale : Scale used for the IC.
Examples
--------
Compare the centered and non centered models of the eight school problem:
.. ipython::
In [1]: import arviz as az
...: data1 = az.load_arviz_data("non_centered_eight")
...: data2 = az.load_arviz_data("centered_eight")
...: compare_dict = {"non centered": data1, "centered": data2}
...: az.compare(compare_dict)
Compare the models using LOO-CV, returning the IC in log scale and calculating the
weights using the stacking method.
.. ipython::
In [1]: az.compare(compare_dict, ic="loo", method="stacking", scale="log")
"""
names = list(dataset_dict.keys())
scale = scale.lower()
if scale == "log":
scale_value = 1
ascending = False
else:
if scale == "negative_log":
scale_value = -1
else:
scale_value = -2
ascending = True
ic = ic.lower()
if ic == "waic":
ic_func = waic
df_comp = pd.DataFrame(
index=names,
columns=["waic", "p_waic", "d_waic", "weight", "se", "dse", "warning", "waic_scale"],
)
scale_col = "waic_scale"
elif ic == "loo":
ic_func = loo
df_comp = pd.DataFrame(
index=names,
columns=["loo", "p_loo", "d_loo", "weight", "se", "dse", "warning", "loo_scale"],
)
scale_col = "loo_scale"
else:
raise NotImplementedError("The information criterion {} is not supported.".format(ic))
if method.lower() not in ["stacking", "bb-pseudo-bma", "pseudo-bma"]:
raise ValueError("The method {}, to compute weights, is not supported.".format(method))
ic_se = "{}_se".format(ic)
p_ic = "p_{}".format(ic)
ic_i = "{}_i".format(ic)
ics = pd.DataFrame()
names = []
for name, dataset in dataset_dict.items():
names.append(name)
ics = ics.append([ic_func(dataset, pointwise=True, scale=scale)])
ics.index = names
ics.sort_values(by=ic, inplace=True, ascending=ascending)
if method.lower() == "stacking":
rows, cols, ic_i_val = _ic_matrix(ics, ic_i)
exp_ic_i = np.exp(ic_i_val / scale_value)
last_col = cols - 1
def w_fuller(weights):
return np.concatenate((weights, [max(1.0 - np.sum(weights), 0.0)]))
def log_score(weights):
w_full = w_fuller(weights)
score = 0.0
for i in range(rows):
score += np.log(np.dot(exp_ic_i[i], w_full))
return -score
def gradient(weights):
w_full = w_fuller(weights)
grad = np.zeros(last_col)
for k in range(last_col - 1):
for i in range(rows):
grad[k] += (exp_ic_i[i, k] - exp_ic_i[i, last_col]) / np.dot(
exp_ic_i[i], w_full
)
return -grad
theta = np.full(last_col, 1.0 / cols)
bounds = [(0.0, 1.0) for _ in range(last_col)]
constraints = [
{"type": "ineq", "fun": lambda x: 1.0 - np.sum(x)},
{"type": "ineq", "fun": np.sum},
]
weights = minimize(
fun=log_score, x0=theta, jac=gradient, bounds=bounds, constraints=constraints
)
weights = w_fuller(weights["x"])
ses = ics[ic_se]
elif method.lower() == "bb-pseudo-bma":
rows, cols, ic_i_val = _ic_matrix(ics, ic_i)
ic_i_val = ic_i_val * rows
b_weighting = st.dirichlet.rvs(alpha=[alpha] * rows, size=b_samples, random_state=seed)
weights = np.zeros((b_samples, cols))
z_bs = np.zeros_like(weights)
for i in range(b_samples):
z_b = np.dot(b_weighting[i], ic_i_val)
u_weights = np.exp((z_b - np.min(z_b)) / scale_value)
z_bs[i] = z_b # pylint: disable=unsupported-assignment-operation
weights[i] = u_weights / np.sum(u_weights)
weights = weights.mean(axis=0)
ses = pd.Series(z_bs.std(axis=0), index=names) # pylint: disable=no-member
elif method.lower() == "pseudo-bma":
min_ic = ics.iloc[0][ic]
z_rv = np.exp((ics[ic] - min_ic) / scale_value)
weights = z_rv / np.sum(z_rv)
ses = ics[ic_se]
if np.any(weights):
min_ic_i_val = ics[ic_i].iloc[0]
for idx, val in enumerate(ics.index):
res = ics.loc[val]
if scale_value < 0:
diff = res[ic_i] - min_ic_i_val
else:
diff = min_ic_i_val - res[ic_i]
d_ic = np.sum(diff)
d_std_err = np.sqrt(len(diff) * np.var(diff))
std_err = ses.loc[val]
weight = weights[idx]
df_comp.at[val] = (
res[ic],
res[p_ic],
d_ic,
weight,
std_err,
d_std_err,
res["warning"],
res[scale_col],
)
return df_comp.sort_values(by=ic, ascending=ascending)
| def compare(
dataset_dict,
ic="waic",
method="BB-pseudo-BMA",
b_samples=1000,
alpha=1,
seed=None,
scale="deviance",
):
r"""Compare models based on WAIC or LOO cross-validation.
WAIC is the widely applicable information criterion, and LOO is leave-one-out
(LOO) cross-validation. Read more theory here - in a paper by some of the
leading authorities on model selection - dx.doi.org/10.1111/1467-9868.00353
Parameters
----------
dataset_dict : dict[str] -> InferenceData
A dictionary of model names and InferenceData objects
ic : str
Information Criterion (WAIC or LOO) used to compare models. Default WAIC.
method : str
Method used to estimate the weights for each model. Available options are:
- 'stacking' : stacking of predictive distributions.
- 'BB-pseudo-BMA' : (default) pseudo-Bayesian Model averaging using Akaike-type
weighting. The weights are stabilized using the Bayesian bootstrap.
- 'pseudo-BMA': pseudo-Bayesian Model averaging using Akaike-type
weighting, without Bootstrap stabilization (not recommended).
For more information read https://arxiv.org/abs/1704.02030
b_samples: int
Number of samples taken by the Bayesian bootstrap estimation.
Only useful when method = 'BB-pseudo-BMA'.
alpha : float
The shape parameter in the Dirichlet distribution used for the Bayesian bootstrap. Only
useful when method = 'BB-pseudo-BMA'. When alpha=1 (default), the distribution is uniform
on the simplex. A smaller alpha will keeps the final weights more away from 0 and 1.
seed : int or np.random.RandomState instance
If int or RandomState, use it for seeding Bayesian bootstrap. Only
useful when method = 'BB-pseudo-BMA'. Default None the global
np.random state is used.
scale : str
Output scale for IC. Available options are:
- `deviance` : (default) -2 * (log-score)
- `log` : 1 * log-score (after Vehtari et al. (2017))
- `negative_log` : -1 * (log-score)
Returns
-------
A DataFrame, ordered from lowest to highest IC. The index reflects the key with which the
models are passed to this function. The columns are:
IC : Information Criteria (WAIC or LOO).
Smaller IC indicates higher out-of-sample predictive fit ("better" model). Default WAIC.
If `scale == log` higher IC indicates higher out-of-sample predictive fit ("better" model).
pIC : Estimated effective number of parameters.
dIC : Relative difference between each IC (WAIC or LOO) and the lowest IC (WAIC or LOO).
It's always 0 for the top-ranked model.
weight: Relative weight for each model.
This can be loosely interpreted as the probability of each model (among the compared model)
given the data. By default the uncertainty in the weights estimation is considered using
Bayesian bootstrap.
SE : Standard error of the IC estimate.
If method = BB-pseudo-BMA these values are estimated using Bayesian bootstrap.
dSE : Standard error of the difference in IC between each model and the top-ranked model.
It's always 0 for the top-ranked model.
warning : A value of 1 indicates that the computation of the IC may not be reliable.
This could be indication of WAIC/LOO starting to fail see
http://arxiv.org/abs/1507.04544 for details.
scale : Scale used for the IC.
Examples
--------
Compare the centered and non centered models of the eight school problem:
.. ipython::
In [1]: import arviz as az
...: data1 = az.load_arviz_data("non_centered_eight")
...: data2 = az.load_arviz_data("centered_eight")
...: compare_dict = {"non centered": data1, "centered": data2}
...: az.compare(compare_dict)
Compare the models using LOO-CV, returning the IC in log scale and calculating the
weights using the stacking method.
.. ipython::
In [1]: az.compare(compare_dict, ic="loo", method="stacking", scale="log")
"""
names = list(dataset_dict.keys())
scale = scale.lower()
if scale == "log":
scale_value = 1
ascending = False
else:
if scale == "negative_log":
scale_value = -1
else:
scale_value = -2
ascending = True
ic = ic.lower()
if ic == "waic":
ic_func = waic
df_comp = pd.DataFrame(
index=names,
columns=["waic", "p_waic", "d_waic", "weight", "se", "dse", "warning", "waic_scale"],
)
scale_col = "waic_scale"
elif ic == "loo":
ic_func = loo
df_comp = pd.DataFrame(
index=names,
columns=["loo", "p_loo", "d_loo", "weight", "se", "dse", "warning", "loo_scale"],
)
scale_col = "loo_scale"
else:
raise NotImplementedError("The information criterion {} is not supported.".format(ic))
if method.lower() not in ["stacking", "bb-pseudo-bma", "pseudo-bma"]:
raise ValueError("The method {}, to compute weights, is not supported.".format(method))
ic_se = "{}_se".format(ic)
p_ic = "p_{}".format(ic)
ic_i = "{}_i".format(ic)
ics = pd.DataFrame()
names = []
for name, dataset in dataset_dict.items():
names.append(name)
ics = ics.append([ic_func(dataset, pointwise=True, scale=scale)])
ics.index = names
ics.sort_values(by=ic, inplace=True, ascending=ascending)
if method.lower() == "stacking":
rows, cols, ic_i_val = _ic_matrix(ics, ic_i)
exp_ic_i = np.exp(ic_i_val / scale_value)
last_col = cols - 1
def w_fuller(weights):
return np.concatenate((weights, [max(1.0 - np.sum(weights), 0.0)]))
def log_score(weights):
w_full = w_fuller(weights)
score = 0.0
for i in range(rows):
score += np.log(np.dot(exp_ic_i[i], w_full))
return -score
def gradient(weights):
w_full = w_fuller(weights)
grad = np.zeros(last_col)
for k in range(last_col - 1):
for i in range(rows):
grad[k] += (exp_ic_i[i, k] - exp_ic_i[i, last_col]) / np.dot(
exp_ic_i[i], w_full
)
return -grad
theta = np.full(last_col, 1.0 / cols)
bounds = [(0.0, 1.0) for _ in range(last_col)]
constraints = [
{"type": "ineq", "fun": lambda x: 1.0 - np.sum(x)},
{"type": "ineq", "fun": np.sum},
]
weights = minimize(
fun=log_score, x0=theta, jac=gradient, bounds=bounds, constraints=constraints
)
weights = w_fuller(weights["x"])
ses = ics[ic_se]
elif method.lower() == "bb-pseudo-bma":
rows, cols, ic_i_val = _ic_matrix(ics, ic_i)
ic_i_val = ic_i_val * rows
b_weighting = st.dirichlet.rvs(alpha=[alpha] * rows, size=b_samples, random_state=seed)
weights = np.zeros((b_samples, cols))
z_bs = np.zeros_like(weights)
for i in range(b_samples):
z_b = np.dot(b_weighting[i], ic_i_val)
u_weights = np.exp((z_b - np.min(z_b)) / scale_value)
z_bs[i] = z_b # pylint: disable=unsupported-assignment-operation
weights[i] = u_weights / np.sum(u_weights)
weights = weights.mean(axis=0)
ses = pd.Series(z_bs.std(axis=0), index=names) # pylint: disable=no-member
elif method.lower() == "pseudo-bma":
min_ic = ics.iloc[0][ic]
z_rv = np.exp((ics[ic] - min_ic) / scale_value)
weights = z_rv / np.sum(z_rv)
ses = ics[ic_se]
if np.any(weights):
min_ic_i_val = ics[ic_i].iloc[0]
for idx, val in enumerate(ics.index):
res = ics.loc[val]
if scale_value < 0:
diff = res[ic_i] - min_ic_i_val
else:
diff = min_ic_i_val - res[ic_i]
d_ic = np.sum(diff)
d_std_err = np.sqrt(len(diff) * np.var(diff))
std_err = ses.loc[val]
weight = weights[idx]
df_comp.at[val] = (
res[ic],
res[p_ic],
d_ic,
weight,
std_err,
d_std_err,
res["warning"],
res[scale_col],
)
return df_comp.sort_values(by=ic, ascending=ascending)
|
15,263 | def setup(hass, config):
"""Set up the Splunk component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
token = conf.get(CONF_TOKEN)
use_ssl = conf[CONF_SSL]
verify_ssl = conf.get(CONF_VERIFY_SSL)
name = conf.get(CONF_NAME)
entity_filter = conf[CONF_FILTER]
hec = http_event_collector(token, host, "json", name, port, use_ssl)
hec.SSL_verify = verify_ssl
if not hec.check_connectivity():
_LOGGER.exception("Cannot connect to Splunk")
def splunk_event_listener(event):
"""Listen for new messages on the bus and sends them to Splunk."""
state = event.data.get("new_state")
if state is None or not entity_filter(state.entity_id):
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
payload = {
"time": event.time_fired.timestamp(),
"host": name,
"event": {
"domain": state.domain,
"entity_id": state.object_id,
"attributes": dict(state.attributes),
"value": _state,
},
}
hec.batchEvent(payload)
def splunk_event_flush(event):
hec.flushBatch()
hass.bus.listen(EVENT_STATE_CHANGED, splunk_event_listener)
hass.bus.listen(EVENT_TIME_CHANGED, splunk_event_flush)
hass.bus.listen(EVENT_HOMEASSISTANT_STOP, splunk_event_flush)
return True
| def setup(hass, config):
"""Set up the Splunk component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
token = conf.get(CONF_TOKEN)
use_ssl = conf[CONF_SSL]
verify_ssl = conf.get(CONF_VERIFY_SSL)
name = conf.get(CONF_NAME)
entity_filter = conf[CONF_FILTER]
hec = http_event_collector(token, host, FORMAT_JSON, name, port, use_ssl)
hec.SSL_verify = verify_ssl
if not hec.check_connectivity():
_LOGGER.exception("Cannot connect to Splunk")
def splunk_event_listener(event):
"""Listen for new messages on the bus and sends them to Splunk."""
state = event.data.get("new_state")
if state is None or not entity_filter(state.entity_id):
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
payload = {
"time": event.time_fired.timestamp(),
"host": name,
"event": {
"domain": state.domain,
"entity_id": state.object_id,
"attributes": dict(state.attributes),
"value": _state,
},
}
hec.batchEvent(payload)
def splunk_event_flush(event):
hec.flushBatch()
hass.bus.listen(EVENT_STATE_CHANGED, splunk_event_listener)
hass.bus.listen(EVENT_TIME_CHANGED, splunk_event_flush)
hass.bus.listen(EVENT_HOMEASSISTANT_STOP, splunk_event_flush)
return True
|
5,059 | def test_subfigure_spanning():
# test that subfigures get laid out properly...
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(3, 3)
sub_figs = []
sub_figs += [fig.add_subfigure(gs[0, 0])]
sub_figs += [fig.add_subfigure(gs[0:2, 1])]
sub_figs += [fig.add_subfigure(gs[2, 1:3])]
w = 640
h = 480
minp = sub_figs[0].bbox.min
exp = np.array([0., h*2/3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[0].bbox.max
exp = np.array([w / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[1].bbox.min
exp = np.array([w / 3, h * 1 / 3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[1].bbox.max
exp = np.array([w * 2 / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[2].bbox.min
exp = np.array([w / 3, 0])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[2].bbox.max
exp = np.array([w, h * 1 / 3])
np.testing.assert_allclose(maxp, exp)
| def test_subfigure_spanning():
# test that subfigures get laid out properly...
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(3, 3)
sub_figs = []
sub_figs += [fig.add_subfigure(gs[0, 0])]
sub_figs += [fig.add_subfigure(gs[0:2, 1])]
sub_figs += [fig.add_subfigure(gs[2, 1:3])]
w = 640
h = 480
minp = sub_figs[0].bbox.min
exp = np.array([0., h*2/3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[0].bbox.max
exp = np.array([w / 3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[1].bbox.min
exp = np.array([w / 3, h * 1 / 3])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[1].bbox.max
exp = np.array([w * 2/3, h])
np.testing.assert_allclose(maxp, exp)
minp = sub_figs[2].bbox.min
exp = np.array([w / 3, 0])
np.testing.assert_allclose(minp, exp)
maxp = sub_figs[2].bbox.max
exp = np.array([w, h * 1 / 3])
np.testing.assert_allclose(maxp, exp)
|
31,959 | def format_eadable_security_policy_match_headers(hedear_name):
formated_headers = {
'From': 'From zone',
'To': 'To zone',
}
return formated_headers.get(hedear_name, hedear_name)
| def format_readable_security_policy_match_headers(hedear_name):
formated_headers = {
'From': 'From zone',
'To': 'To zone',
}
return formated_headers.get(hedear_name, hedear_name)
|
54,611 | def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
"""
Compute the first Wasserstein distance between two 1D distributions.
:param u_values: Values of first distribution with shape (nb_samples, feature_dim_1, ..., feature_dim_n)
:type u_values: `np.ndarray`
:param v_values: Values of second distribution with shape (nb_samples, feature_dim_1, ..., feature_dim_n)
:type v_values: `np.ndarray`
:param u_weights: Weight for each value. If None equal weights will be used.
:type u_weights: `np.ndarray`
:param v_weights: Weight for each value. If None equal weights will be used.
:type v_weights: `np.ndarray`
:return: The Wasserstein distance between the two distributions
:rtype: `np.ndarray`
"""
from scipy.stats import wasserstein_distance
assert u_values.shape == v_values.shape
if u_weights is not None:
assert v_weights is not None
if u_weights is None:
assert v_weights is None
if u_weights is not None and v_weights is not None:
assert u_weights.shape == v_weights.shape
if u_weights is not None:
assert u_values.shape[0] == u_weights.shape[0]
u_values = u_values.flatten().reshape(u_values.shape[0], -1)
v_values = v_values.flatten().reshape(v_values.shape[0], -1)
wd = np.zeros(u_values.shape[0])
for i in range(u_values.shape[0]):
if u_weights is None and v_weights is None:
wd[i] = wasserstein_distance(u_values[i], v_values[i])
elif u_weights is not None and v_weights is not None:
wd[i] = wasserstein_distance(u_values[i], v_values[i], u_weights[i], v_weights[i])
return wd
| def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
"""
Compute the first Wasserstein distance between two 1D distributions.
:param u_values: Values of first distribution with shape (nb_samples, feature_dim_1, ..., feature_dim_n)
:type u_values: `np.ndarray`
:param v_values: Values of second distribution with shape (nb_samples, feature_dim_1, ..., feature_dim_n)
:type v_values: `np.ndarray`
:param u_weights: Weight for each value. If None equal weights will be used.
:type u_weights: `np.ndarray`
:param v_weights: Weight for each value. If None equal weights will be used.
:type v_weights: `np.ndarray`
:return: The Wasserstein distance between the two distributions
:rtype: `np.ndarray`
"""
from scipy.stats import wasserstein_distance
assert u_values.shape == v_values.shape
if u_weights is not None:
assert v_weights is not None
assert u_weights.shape == v_weights.shape
assert u_values.shape[0] == u_weights.shape[0]
else:
assert v_weights is None
u_values = u_values.flatten().reshape(u_values.shape[0], -1)
v_values = v_values.flatten().reshape(v_values.shape[0], -1)
wd = np.zeros(u_values.shape[0])
for i in range(u_values.shape[0]):
if u_weights is None and v_weights is None:
wd[i] = wasserstein_distance(u_values[i], v_values[i])
elif u_weights is not None and v_weights is not None:
wd[i] = wasserstein_distance(u_values[i], v_values[i], u_weights[i], v_weights[i])
return wd
|
55,864 | def select(
key: str,
default: Any = _DEFAULT_SELECT_MARKER_,
*,
_parent_: Container,
) -> Any:
from omegaconf._impl import select_value
if default is _DEFAULT_SELECT_MARKER_:
return select_value(cfg=_parent_, key=key, absolute_key=True)
else:
return select_value(cfg=_parent_, key=key, absolute_key=True, default=default)
| def select(
key: str,
default: Any = _DEFAULT_MARKER_,
*,
_parent_: Container,
) -> Any:
from omegaconf._impl import select_value
return select_value(cfg=_parent_, key=key, absolute_key=True, default=default)
|
40,482 | def to_homogeneous_label(data: HeteroData, ) -> Tensor:
labeled_node_types = []
y = []
for i, node_type in enumerate(data.node_types):
if 'y' in data[node_type].keys():
labeled_node_types.append(i)
y.append(data[node_type].y)
else:
y.append(-1 * torch.ones(data[node_type].num_nodes))
if labeled_node_types:
return torch.cat(y)
| def to_homogeneous_label(data: HeteroData) -> Tensor:
labeled_node_types = []
y = []
for i, node_type in enumerate(data.node_types):
if 'y' in data[node_type].keys():
labeled_node_types.append(i)
y.append(data[node_type].y)
else:
y.append(-1 * torch.ones(data[node_type].num_nodes))
if labeled_node_types:
return torch.cat(y)
|
31,219 | def install_packs(client: demisto_client,
host: str,
packs_to_install: list,
request_timeout: int = 999999,
is_nightly: bool = False):
""" Make a packs installation request.
Args:
client (demisto_client): The configured client to use.
host (str): The server URL.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
is_nightly (bool): Is the build nightly or not.
"""
if is_nightly:
install_nightly_packs(client, host, packs_to_install)
return
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
logging.info(f'Installing packs on server {host}')
packs_to_install_str = ', '.join([pack['id'] for pack in packs_to_install])
logging.debug(f'Installing the following packs on server {host}:\n{packs_to_install_str}')
# make the pack installation request
try:
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body=request_data,
accept='application/json',
_request_timeout=request_timeout)
if 200 <= status_code < 300:
packs_data = [{'ID': pack.get('id'), 'CurrentVersion': pack.get('currentVersion')} for
pack in
ast.literal_eval(response_data)]
logging.success(f'Packs were successfully installed on server {host}')
logging.debug(f'The following packs were successfully installed on server {host}:\n{packs_data}')
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
raise Exception(f'Failed to install packs - with status code {status_code}\n{message}')
except Exception as e:
logging.exception(f'The request to install packs has failed. Additional info: {str(e)}')
global SUCCESS_FLAG
SUCCESS_FLAG = False
finally:
return SUCCESS_FLAG
| def install_packs(client: demisto_client,
host: str,
packs_to_install: list,
request_timeout: int = 999999,
is_nightly: bool = False):
""" Make a packs installation request.
Args:
client (demisto_client): The configured client to use.
host (str): The server URL.
packs_to_install (list): A list of the packs to install.
request_timeout (int): Timeout settings for the installation request.
is_nightly (bool): Is the build nightly or not.
"""
if is_nightly:
install_nightly_packs(client, host, packs_to_install)
return
request_data = {
'packs': packs_to_install,
'ignoreWarnings': True
}
logging.info(f'Installing packs on server {host}')
packs_to_install_str = ', '.join([pack['id'] for pack in packs_to_install])
logging.debug(f'Installing the following packs on server {host}:\n{packs_to_install_str}')
# make the pack installation request
try:
response_data, status_code, _ = demisto_client.generic_request_func(client,
path='/contentpacks/marketplace/install',
method='POST',
body=request_data,
accept='application/json',
_request_timeout=request_timeout)
if 200 <= status_code < 300:
packs_data = [{'ID': pack.get('id'), 'CurrentVersion': pack.get('currentVersion')} for
pack in
ast.literal_eval(response_data)]
logging.success(f'Packs were successfully installed on server {host}')
logging.debug(f'The following packs were successfully installed on server {host}:\n{packs_data}')
else:
result_object = ast.literal_eval(response_data)
message = result_object.get('message', '')
raise Exception(f'Failed to install packs - with status code {status_code}\n{message}')
except Exception as e:
logging.exception(f'The request to install packs has failed. Additional info: {e.args[0]}')
global SUCCESS_FLAG
SUCCESS_FLAG = False
finally:
return SUCCESS_FLAG
|
16,573 | def _load_platform(hass, config, entity_type, entity_info_list):
"""Load platform with list of entity info."""
hass.async_create_task(
async_load_platform(hass, entity_type, DOMAIN, entity_info_list, config)
)
| def _load_platform(hass, config, entity_type, entity_info):
"""Load platform with list of entity info."""
hass.async_create_task(
async_load_platform(hass, entity_type, DOMAIN, entity_info_list, config)
)
|
56,818 | def _can_delete_scheduled_report(report, user, domain):
if report.domain != domain:
return False
if user._id != report.owner_id and not user.is_domain_admin(domain):
return False
return True
| def _can_delete_scheduled_report(report, user, domain):
return (
report.domain == domain
and user._id == report.owner_id
and user.is_domain_admin(domain)
)
|
12,142 | def package_name_from_path(current_path, remove_uuid_suffix=False):
"""Return name of package without file extensions from current path.
This helper works for all package types (e.g. transfer, AIP, AIC, DIP).
:param current_path: Current path to package.
:param remove_uuid_suffix: Optional boolean to additionally remove UUID suffix.
:returns: Package name minus any file extensions.
"""
path = Path(current_path)
name, chars_to_remove = path.name, 0
if remove_uuid_suffix is True:
chars_to_remove = PACKAGE_UUID_SUFFIX_LENGTH
for suffix in reversed(path.suffixes):
if suffix not in PACKAGE_EXTENSIONS:
break
chars_to_remove += len(suffix)
# Check if we have characters to remove to avoid accidentally returning an
# empty string with name[:-0].
if not chars_to_remove > 0:
return name
return name[:-chars_to_remove]
| def package_name_from_path(current_path, remove_uuid_suffix=False):
"""Return name of package without file extensions from current path.
This helper works for all package types (e.g. transfer, AIP, AIC, DIP).
:param current_path: Current path to package.
:param remove_uuid_suffix: Optional boolean to additionally remove UUID suffix.
:returns: Package name minus any file extensions.
"""
path = Path(current_path)
name, chars_to_remove = path.name, 0
if remove_uuid_suffix is True:
chars_to_remove = PACKAGE_UUID_SUFFIX_LENGTH
for suffix in reversed(path.suffixes):
if suffix not in PACKAGE_EXTENSIONS:
break
chars_to_remove += len(suffix)
# Check if we have characters to remove to avoid accidentally returning an
# empty string with name[:-0].
if not chars_to_remove:
return name
return name[:-chars_to_remove]
|
53,781 | def test_mremove(network):
"""
GIVEN the AC DC exemplary pypsa network
WHEN two components of Generator are removed with mremove
THEN the generator dataframe and the time-dependent generator dataframe
should not contain the removed elements.
"""
generators = ['Manchester Wind', 'Frankfurt Wind']
network.mremove('Generator', generators)
assert generators not in network.generators.index.tolist()
assert generators not in network.generators_t.p_max_pu.columns.tolist()
| def test_mremove(network):
"""
GIVEN the AC DC exemplary pypsa network
WHEN two components of Generator are removed with mremove
THEN the generator dataframe and the time-dependent generator dataframe
should not contain the removed elements.
"""
generators = {'Manchester Wind', 'Frankfurt Wind'}
network.mremove('Generator', generators)
assert not generators.issubset(network.generators.index)
assert not generators.issubset(network.generators_t.p_max_pu.columns)
|
32,530 | def _main(args: Dict[str, Any]) -> CommandResults:
query = f'''name:"{args['playbook_name']}"'''
body = {
'query': query
}
playbooks_json = perform_rest_call('post', 'playbook/search', body)
for playbook_json in playbooks_json['playbooks']:
if playbook_json['name'] == args['playbook_name']:
break
append_to_playbooks_and_integrations(playbook_json['name'], playbook_json['brands'])
tasks = get_tasks_list(playbook_json['tasks'])
for task in tasks:
if task['type'] == 'playbook':
get_subplaybook_tasks(task)
outputs = {
'Playbooks': playbooks,
'Integrations': integrations
}
return CommandResults(
readable_output=f'''Retrieved Playbooks and Integrations for Playbook "{playbook_json['name']}"''',
outputs_prefix='RetrievePlaybooksAndIntegrations',
outputs_key_field='',
outputs=outputs,
)
| def retrieve_playbooks_and_integrations(args: Dict[str, Any]) -> CommandResults:
query = f'''name:"{args['playbook_name']}"'''
body = {
'query': query
}
playbooks_json = perform_rest_call('post', 'playbook/search', body)
for playbook_json in playbooks_json['playbooks']:
if playbook_json['name'] == args['playbook_name']:
break
append_to_playbooks_and_integrations(playbook_json['name'], playbook_json['brands'])
tasks = get_tasks_list(playbook_json['tasks'])
for task in tasks:
if task['type'] == 'playbook':
get_subplaybook_tasks(task)
outputs = {
'Playbooks': playbooks,
'Integrations': integrations
}
return CommandResults(
readable_output=f'''Retrieved Playbooks and Integrations for Playbook "{playbook_json['name']}"''',
outputs_prefix='RetrievePlaybooksAndIntegrations',
outputs_key_field='',
outputs=outputs,
)
|
42,924 | def c_1(clique: list, graph: nx.Graph):
"""Generates the set :math:`C_1` of nodes that are connected to all but one of the nodes in
the input clique subgraph
The set :math:`C_1` is defined in :cite:`pullan2006phased` and is used to determine outside
nodes that can be swapped with clique nodes to create a new clique.
Example usage:
.. code-block::
>>> from strawberryfields.apps.graph import utils
>>> import networkx as nx
>>> graph = nx.wheel_graph(5)
>>> subgraph = [0, 1, 2] # these nodes form a clique
>>> utils.c_1(subgraph, graph)
[(1, 3), (2, 4)]
Args:
clique (list[int]): A subgraph specified by a list of nodes; the subgraph must be a clique
graph (nx.Graph): the input graph
Returns:
list[int]: A list of tuples. The first node in the tuple is the node in the clique and the
second node is the outside node it can be swapped with.
"""
if not is_clique(graph.subgraph(clique)):
raise ValueError("Input subgraph is not a clique")
clique = set(clique)
c_1_nodes = []
non_clique_nodes = set(graph.nodes) - clique
for i in non_clique_nodes:
neighbors_in_subgraph = clique.intersection(graph.neighbors(i))
if len(neighbors_in_subgraph) == len(clique) - 1:
to_swap = clique - neighbors_in_subgraph
(i_clique,) = to_swap
c_1_nodes.append((i_clique, i))
return c_1_nodes
| def c_1(clique: list, graph: nx.Graph):
"""Generates the set :math:`C_1` of nodes that are connected to all but one of the nodes in
the input clique subgraph
The set :math:`C_1` is defined in :cite:`pullan2006phased` and is used to determine outside
nodes that can be swapped with clique nodes to create a new clique.
Example usage:
.. code-block::
>>> from strawberryfields.apps.graph import utils
>>> import networkx as nx
>>> graph = nx.wheel_graph(5)
>>> subgraph = [0, 1, 2] # these nodes form a clique
>>> utils.c_1(subgraph, graph)
[(1, 3), (2, 4)]
Args:
clique (list[int]): a subgraph specified by a list of nodes; the subgraph must be a clique
graph (nx.Graph): the input graph
Returns:
list[int]: A list of tuples. The first node in the tuple is the node in the clique and the
second node is the outside node it can be swapped with.
"""
if not is_clique(graph.subgraph(clique)):
raise ValueError("Input subgraph is not a clique")
clique = set(clique)
c_1_nodes = []
non_clique_nodes = set(graph.nodes) - clique
for i in non_clique_nodes:
neighbors_in_subgraph = clique.intersection(graph.neighbors(i))
if len(neighbors_in_subgraph) == len(clique) - 1:
to_swap = clique - neighbors_in_subgraph
(i_clique,) = to_swap
c_1_nodes.append((i_clique, i))
return c_1_nodes
|
31,497 | def test_module(client: Client, client_body: Dict, full_url) -> str:
try:
# testing a known malicious URL to check if we get matches var
test_url = "http://testsafebrowsing.appspot.com/apiv4/ANY_PLATFORM/MALWARE/URL/"
body = build_request_body(client_body, test_url)
res = client.url_request(body, full_url)
if res.get('matches'):
message = 'ok'
else:
message = 'Error querying Google Safe Browsing. Expected matching respons, but received none'
except DemistoException as e:
if 'Forbidden' in str(e) or 'Authorization' in str(e):
message = 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return message
| def test_module(client: Client, client_body: Dict, full_url) -> str:
try:
# testing a known malicious URL to check if we get matches
test_url = "http://testsafebrowsing.appspot.com/apiv4/ANY_PLATFORM/MALWARE/URL/"
body = build_request_body(client_body, test_url)
res = client.url_request(body, full_url)
if res.get('matches'):
message = 'ok'
else:
message = 'Error querying Google Safe Browsing. Expected matching respons, but received none'
except DemistoException as e:
if 'Forbidden' in str(e) or 'Authorization' in str(e):
message = 'Authorization Error: make sure API Key is correctly set'
else:
raise e
return message
|
24,534 | def test_split_populations():
"""
This test makes sure that splitting a single population of ions or electrons
into two identical halves returns the same result.
"""
wavelengths = np.arange(520, 545, 0.01) * u.nm
probe_wavelength = 532 * u.nm
n = 5e17 * u.cm ** -3
probe_vec = np.array([1, 0, 0])
scatter_vec = np.array([0, 1, 0])
# Combined
Te = np.array([10]) * u.eV
Ti = np.array([10]) * u.eV
ion_species = ["H+"]
ifract = np.array([1.0])
efract = np.array([1.0])
alpha, Skw0 = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
Te,
Ti,
ifract=ifract,
efract=efract,
ion_species=ion_species,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Split e and i populations into two parts
# this should not change the results since the parts are identical
Te = np.array([10, 10]) * u.eV
Ti = np.array([10, 10]) * u.eV
ion_species = ["H+", "H+"]
ifract = np.array([0.2, 0.8])
efract = np.array([0.8, 0.2])
alpha, Skw1 = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
Te,
Ti,
ifract=ifract,
efract=efract,
ion_species=ion_species,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Calculate the deviation between the two spectra
# (any differences should be in the noise)
deviation = (Skw0 - Skw1) / Skw0 * 100
assert all(deviation < 1e-6), "Failed split populations teset"
| def test_split_populations():
"""
This test makes sure that splitting a single population of ions or electrons
into two identical halves returns the same result.
"""
wavelengths = np.arange(520, 545, 0.01) * u.nm
probe_wavelength = 532 * u.nm
n = 5e17 * u.cm ** -3
probe_vec = np.array([1, 0, 0])
scatter_vec = np.array([0, 1, 0])
# Combined
Te = np.array([10]) * u.eV
Ti = np.array([10]) * u.eV
ion_species = ["H+"]
ifract = np.array([1.0])
efract = np.array([1.0])
alpha, Skw0 = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
Te,
Ti,
ifract=ifract,
efract=efract,
ion_species=ion_species,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Split e and i populations into two parts
# this should not change the results since the parts are identical
Te = np.array([10, 10]) * u.eV
Ti = np.array([10, 10]) * u.eV
ion_species = ["H+", "H+"]
ifract = np.array([0.2, 0.8])
efract = np.array([0.8, 0.2])
alpha, Skw1 = thomson.spectral_density(
wavelengths,
probe_wavelength,
n,
Te,
Ti,
ifract=ifract,
efract=efract,
ion_species=ion_species,
probe_vec=probe_vec,
scatter_vec=scatter_vec,
)
# Calculate the deviation between the two spectra
# (any differences should be in the noise)
deviation = (Skw0 - Skw1) / Skw0 * 100
assert np.all(deviation < 1e-6), "Failed split populations test"
|
43,563 | def AngleEmbedding(features, wires, rotation='X'):
r"""
Encodes :math:`N` features into the rotation angles of :math:`n` qubits, where :math:`N \leq n`.
The rotations can be chosen as either :class:`~pennylane.ops.RX`, :class:`~pennylane.ops.RY`
or :class:`~pennylane.ops.RZ` gates, as defined by the ``rotation`` parameter:
* ``rotation='X'`` uses the features as angles of RX rotations
* ``rotation='Y'`` uses the features as angles of RY rotations
* ``rotation='Z'`` uses the features as angles of RZ rotations
The length of ``features`` has to be smaller or equal to the number of qubits. If there are fewer entries in
``features`` than rotations, the circuit does not apply the remaining rotation gates.
This embedding method can also be used to encode a binary sequence into a basis state. For example, to prepare
basis state :math:`|0,1,1,0\rangle`, choose ``rotation='X'`` and use the
feature vector :math:`[0, \pi/2, \pi/2, 0]`. Alternatively, one can use the :mod:`BasisEmbedding()` template.
Args:
features (array): Input array of shape ``(N,)``, where N is the number of input features to embed,
with :math:`N\leq n`
wires (Sequence[int]): sequence of qubit indices that the template acts on
Keyword Args:
rotation (str): Type of rotations used
Raises:
ValueError: if `features` or `wires` is invalid
"""
if not isinstance(wires, Iterable):
raise ValueError("Wires needs to be a list of wires that the embedding uses; got {}.".format(wires))
if len(features) > len(wires):
raise ValueError("Number of features to embed cannot be larger than number of wires, which is {}; "
"got {}.".format(len(wires), len(features)))
if rotation == 'X':
for f, w in zip(features, wires):
RX(f, wires=w)
elif rotation == 'Y':
for f, w in zip(features, wires):
RY(f, wires=w)
elif rotation == 'Z':
for f, w in zip(features, wires):
RZ(f, wires=w)
else:
raise ValueError("Rotation has to be `X`, `Y` or `Z`; got {}.".format(rotation))
| def AngleEmbedding(features, wires, rotation='X'):
r"""
Encodes :math:`N` features into the rotation angles of :math:`n` qubits, where :math:`N \leq n`.
The rotations can be chosen as either :class:`~pennylane.ops.RX`, :class:`~pennylane.ops.RY`
or :class:`~pennylane.ops.RZ` gates, as defined by the ``rotation`` parameter:
* ``rotation='X'`` uses the features as angles of RX rotations
* ``rotation='Y'`` uses the features as angles of RY rotations
* ``rotation='Z'`` uses the features as angles of RZ rotations
The length of ``features`` has to be smaller or equal to the number of qubits. If there are fewer entries in
``features`` than rotations, the circuit does not apply the remaining rotation gates.
This embedding method can also be used to encode a binary sequence into a basis state. For example, to prepare
basis state :math:`|0,1,1,0\rangle`, choose ``rotation='X'`` and use the
feature vector :math:`[0, \pi/2, \pi/2, 0]`. Alternatively, one can use the :mod:`BasisEmbedding()` template.
Args:
features (array): Input array of shape ``(N,)``, where N is the number of input features to embed,
with :math:`N\leq n`
wires (Sequence[int]): sequence of qubit indices that the template acts on
Keyword Args:
rotation (str): Type of rotations used
Raises:
ValueError: if ``features`` or ``wires`` is invalid
"""
if not isinstance(wires, Iterable):
raise ValueError("Wires needs to be a list of wires that the embedding uses; got {}.".format(wires))
if len(features) > len(wires):
raise ValueError("Number of features to embed cannot be larger than number of wires, which is {}; "
"got {}.".format(len(wires), len(features)))
if rotation == 'X':
for f, w in zip(features, wires):
RX(f, wires=w)
elif rotation == 'Y':
for f, w in zip(features, wires):
RY(f, wires=w)
elif rotation == 'Z':
for f, w in zip(features, wires):
RZ(f, wires=w)
else:
raise ValueError("Rotation has to be `X`, `Y` or `Z`; got {}.".format(rotation))
|
1,413 | def _adds_deprecated_submodules():
for module, deprecated_path, correct_path in _DEPRECATED_MODULES:
deprecated_content = _DEPRECATE_TEMPLATE.format(
module=module, deprecated_path=deprecated_path,
correct_path=correct_path)
deprecated_parts = deprecated_path.split(".")
deprecated_parts[-1] = deprecated_parts[-1] + ".py"
with Path(*deprecated_parts).open('w') as f:
f.write(deprecated_content)
| def _add_deprecated_submodules():
for module, deprecated_path, correct_path in _DEPRECATED_MODULES:
deprecated_content = _DEPRECATE_TEMPLATE.format(
module=module, deprecated_path=deprecated_path,
correct_path=correct_path)
deprecated_parts = deprecated_path.split(".")
deprecated_parts[-1] = deprecated_parts[-1] + ".py"
with Path(*deprecated_parts).open('w') as f:
f.write(deprecated_content)
|
3,398 | def rename_issue_widget_query_fields(apps, schema_editor):
DashboardWidgetQuery = apps.get_model("sentry", "DashboardWidgetQuery")
old_to_new_field_mapping = {
"count": "events",
"userCount": "users",
"lifetimeCount": "lifetimeEvents",
"lifetimeUserCount": "lifetimeUsers",
}
for query in RangeQuerySetWrapperWithProgressBar(DashboardWidgetQuery.objects.all()):
fields = getattr(query, "fields")
new_fields = map(
lambda field: field
if field not in old_to_new_field_mapping.keys()
else old_to_new_field_mapping[field],
fields,
)
if fields != new_fields:
query.fields = new_fields
query.save()
| def rename_issue_widget_query_fields(apps, schema_editor):
DashboardWidgetQuery = apps.get_model("sentry", "DashboardWidgetQuery")
old_to_new_field_mapping = {
"count": "events",
"userCount": "users",
"lifetimeCount": "lifetimeEvents",
"lifetimeUserCount": "lifetimeUsers",
}
for query in RangeQuerySetWrapperWithProgressBar(DashboardWidgetQuery.objects.all()):
fields = getattr(query, "fields")
new_fields = [old_to_new_field_mapping.get(field, field) for field in fields]
if fields != new_fields:
query.fields = new_fields
query.save()
|
8,243 | def extract_along_coord(smap, coord):
"""
Return the value of the image array at every point along the coordinate.
For a given coordinate ``coord``, find all the pixels that cross the coordinate
and extract the values of the image array in ``smap`` at these points. This is done by applying
`Bresenham's line algorithm <http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm>`_
between the consecutive pairs of points in the coordinate and then indexing the data
array of ``smap`` at those points.
Parameters
----------
smap : `~sunpy.map.GenericMap`
coord : `~astropy.coordinates.SkyCoord`
Coordinate along which to extract intensity
Returns
-------
intensity : `~astropy.units.Quantity`
loop_coord : `~astropy.coordinates.SkyCoord`
"""
if not len(coord.shape) or coord.shape[0] < 2:
raise ValueError('At least two points are required for extracting intensity along a '
'line. To extract points at single coordinates, use '
'sunpy.map.maputils.sample_at_coords.')
if not all(contains_coordinate(smap, coord)):
raise ValueError('At least one coordinate is not within the bounds of the map.'
'To extract the intensity along a coordinate, all points must fall within '
'the bounds of the map.')
# Find pixels between each loop segment
px, py = smap.wcs.world_to_array_index(coord)
pix = []
for i in range(len(px)-1):
b = _bresenham(px[i], py[i], px[i+1], py[i+1])
# Pop the last one, unless this is the final entry because the first point
# of the next section will be the same
if i < (len(px) - 2):
b = b[:-1]
pix.append(b)
pix = np.vstack(pix)
intensity = u.Quantity(smap.data[pix[:, 0], pix[:, 1]], smap.unit)
coord_new = smap.pixel_to_world(pix[:, 1]*u.pix, pix[:, 0]*u.pix)
return intensity, coord_new
| def extract_along_coord(smap, coord):
"""
Return the value of the image array at every point along the coordinate.
For a given coordinate ``coord``, find all the pixels that cross the coordinate
and extract the values of the image array in ``smap`` at these points. This is done by applying
`Bresenham's line algorithm <http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm>`_
between the consecutive pairs of points in the coordinates and then indexing the data
array of ``smap`` at those points.
Parameters
----------
smap : `~sunpy.map.GenericMap`
coord : `~astropy.coordinates.SkyCoord`
Coordinate along which to extract intensity
Returns
-------
intensity : `~astropy.units.Quantity`
loop_coord : `~astropy.coordinates.SkyCoord`
"""
if not len(coord.shape) or coord.shape[0] < 2:
raise ValueError('At least two points are required for extracting intensity along a '
'line. To extract points at single coordinates, use '
'sunpy.map.maputils.sample_at_coords.')
if not all(contains_coordinate(smap, coord)):
raise ValueError('At least one coordinate is not within the bounds of the map.'
'To extract the intensity along a coordinate, all points must fall within '
'the bounds of the map.')
# Find pixels between each loop segment
px, py = smap.wcs.world_to_array_index(coord)
pix = []
for i in range(len(px)-1):
b = _bresenham(px[i], py[i], px[i+1], py[i+1])
# Pop the last one, unless this is the final entry because the first point
# of the next section will be the same
if i < (len(px) - 2):
b = b[:-1]
pix.append(b)
pix = np.vstack(pix)
intensity = u.Quantity(smap.data[pix[:, 0], pix[:, 1]], smap.unit)
coord_new = smap.pixel_to_world(pix[:, 1]*u.pix, pix[:, 0]*u.pix)
return intensity, coord_new
|
15,501 | def run_callback_threadsafe(
loop: AbstractEventLoop, callback: Callable[..., T], *args: Any
) -> "concurrent.futures.Future[T]":
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
future: concurrent.futures.Future = concurrent.futures.Future()
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
if hasattr(loop, _SHUTDOWN_RUN_CALLBACK_THREADSAFE):
#
# If the final `HomeAssistant.async_block_till_done` in
# `HomeAssistant.async_stop` has already been called, the callback
# will never run and, `future.result()` will block forever which
# will prevent the thread running this code from shutting down which
# will result in a deadlock when the main thread attempts to shutdown
# the executor and `.join()` the thread running this code.
#
# To prevent this deadlock we do the following on shutdown:
#
# 1. Set the _SHUTDOWN_RUN_CALLBACK_THREADSAFE attr on this function
# by calling `shutdown_run_callback_threadsafe`
# 2. Call `hass.async_block_till_done` at least once after shutdown
# to ensure all callbacks have run
# 3. Raise an exception here to ensure `future.result()` can never be
# called and hit the deadlock since once `shutdown_run_callback_threadsafe`
# we cannot promise the callback will be executed.
#
future.cancel()
raise RuntimeError("Shutdown occurred")
return future
| def run_callback_threadsafe(
loop: AbstractEventLoop, callback: Callable[..., T], *args: Any
) -> "concurrent.futures.Future[T]":
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
future: concurrent.futures.Future = concurrent.futures.Future()
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
if hasattr(loop, _SHUTDOWN_RUN_CALLBACK_THREADSAFE):
#
# If the final `HomeAssistant.async_block_till_done` in
# `HomeAssistant.async_stop` has already been called, the callback
# will never run and, `future.result()` will block forever which
# will prevent the thread running this code from shutting down which
# will result in a deadlock when the main thread attempts to shutdown
# the executor and `.join()` the thread running this code.
#
# To prevent this deadlock we do the following on shutdown:
#
# 1. Set the _SHUTDOWN_RUN_CALLBACK_THREADSAFE attr on this function
# by calling `shutdown_run_callback_threadsafe`
# 2. Call `hass.async_block_till_done` at least once after shutdown
# to ensure all callbacks have run
# 3. Raise an exception here to ensure `future.result()` can never be
# called and hit the deadlock since once `shutdown_run_callback_threadsafe`
# we cannot promise the callback will be executed.
#
future.cancel()
raise RuntimeError("The event loop is shutting down and cannot accept new callbacks.")
return future
|
20,018 | def spatial_clustering(mask, algorithm="OPTICS", min_cluster_size=5, max_distance=0, njobs=-1):
"""Counts and segments portions of an image based on distance between two pixels.
Masks showing all clusters, plus masks of individual clusters, are returned.
img: Image to segment.
Algorithm: Algorithm to use for segregating different clusters.
Currently supporting OPTICS and DBSCAN. (Default="OPTICS")
min_cluster_size: The minimum size a section of a mask must be (in pixels)
before it can be considered its own cluster. (Default=5)
max_distance: The total distance between two pixels for them to be considered a part
of the same cluster. For the DBSCAN algorithm, value must be between
0 and 1. For OPTICS, the value is in pixels and depends on the size
of your picture. (Default=0)
njobs: The number of processors to use for calculation of the clusters.
Default is all available processors.
"""
al_upper = algorithm.upper()
if "OPTICS" in al_upper:
max_distance = np.inf
elif "DBSCAN" in al_upper:
max_distance = 0.2
else:
raise NameError("Please use only 'OPTICS' or 'DBSCAN' ")
if not max_distance == 0:
max_distance = max_distance
vis = mask
backtorgb = cv2.cvtColor(vis, cv2.COLOR_GRAY2RGB)
x, y = np.where(np.all(backtorgb == [255, 255, 255], axis=2))
zipped = np.column_stack((x, y))
if "OPTICS" in al_upper:
scaled = StandardScaler(with_mean=False, with_std=False).fit_transform(zipped)
db = OPTICS(max_eps=max_distance, min_samples=min_cluster_size, n_jobs=njobs).fit(scaled)
elif "DBSCAN" in al_upper:
scaled = StandardScaler().fit_transform(zipped)
db = DBSCAN(eps=max_distance, min_samples=min_cluster_size, n_jobs=njobs).fit(scaled)
n_clusters_ = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0)
colors = color_palette(n_clusters_ + 1)
dict_of_colors = {}
sub_mask = []
h, w = backtorgb.shape[:2]
image = np.zeros((h, w, 3), np.uint8)
for y in range(-1, n_clusters_ + 1):
dict_of_colors[str(y)] = colors[y]
for y in range(0, n_clusters_):
sub_mask.append(np.zeros((h, w, 3), np.uint8))
dict_of_colors[str(-1)] = (255, 255, 255)
for z in range(0, len(db.labels_)):
if not db.labels_[z] == -1:
sub_mask[db.labels_[z]][zipped[z][0], zipped[z][1]] = (dict_of_colors[str(db.labels_[z])][2],
dict_of_colors[str(db.labels_[z])][1],
dict_of_colors[str(db.labels_[z])][0])
image[zipped[z][0], zipped[z][1]] = (dict_of_colors[str(db.labels_[z])][2],
dict_of_colors[str(db.labels_[z])][1],
dict_of_colors[str(db.labels_[z])][0])
params.device += 1
if params.debug == 'print':
print_image(image, "full_image_mask.png")
for c in range(0, len(sub_mask)):
print_image(sub_mask[c], "subimage_cluster_" + str(c) + ".png")
elif params.debug == 'plot':
plot_image(image)
for c in range(0, len(sub_mask)):
plot_image(sub_mask[c])
return image, sub_mask
| def spatial_clustering(mask, algorithm="OPTICS", min_cluster_size=5, max_distance=0, njobs=-1):
"""Counts and segments portions of an image based on distance between two pixels.
Masks showing all clusters, plus masks of individual clusters, are returned.
img: Image to segment.
Algorithm: Algorithm to use for segregating different clusters.
Currently supporting OPTICS and DBSCAN. (Default="OPTICS")
min_cluster_size: The minimum size a section of a mask must be (in pixels)
before it can be considered its own cluster. (Default=5)
max_distance: The total distance between two pixels for them to be considered a part
of the same cluster. For the DBSCAN algorithm, value must be between
0 and 1. For OPTICS, the value is in pixels and depends on the size
of your picture. (Default=0)
njobs: The number of processors to use for calculation of the clusters.
Default is all available processors.
"""
al_upper = algorithm.upper()
if "OPTICS" in al_upper:
max_distance = np.inf
elif "DBSCAN" in al_upper:
max_distance = 0.2
else:
raise NameError("Please use only 'OPTICS' or 'DBSCAN' ")
if not max_distance == 0:
max_distance = max_distance
vis = mask
backtorgb = cv2.cvtColor(vis, cv2.COLOR_GRAY2RGB)
x, y = np.where(np.all(backtorgb == [255, 255, 255], axis=2))
zipped = np.column_stack((x, y))
if "OPTICS" in al_upper:
scaled = StandardScaler(with_mean=False, with_std=False).fit_transform(zipped)
db = OPTICS(max_eps=max_distance, min_samples=min_cluster_size, n_jobs=njobs).fit(scaled)
elif "DBSCAN" in al_upper:
scaled = StandardScaler().fit_transform(zipped)
db = DBSCAN(eps=max_distance, min_samples=min_cluster_size, n_jobs=njobs).fit(scaled)
n_clusters_ = len(set(db.labels_)) - (1 if -1 in db.labels_ else 0)
colors = color_palette(n_clusters_ + 1)
dict_of_colors = {}
sub_mask = []
h, w = backtorgb.shape[:2]
image = np.zeros((h, w, 3), np.uint8)
for y in range(0, n_clusters_):
dict_of_colors[str(y)] = colors[y]
for y in range(0, n_clusters_):
sub_mask.append(np.zeros((h, w, 3), np.uint8))
dict_of_colors[str(-1)] = (255, 255, 255)
for z in range(0, len(db.labels_)):
if not db.labels_[z] == -1:
sub_mask[db.labels_[z]][zipped[z][0], zipped[z][1]] = (dict_of_colors[str(db.labels_[z])][2],
dict_of_colors[str(db.labels_[z])][1],
dict_of_colors[str(db.labels_[z])][0])
image[zipped[z][0], zipped[z][1]] = (dict_of_colors[str(db.labels_[z])][2],
dict_of_colors[str(db.labels_[z])][1],
dict_of_colors[str(db.labels_[z])][0])
params.device += 1
if params.debug == 'print':
print_image(image, "full_image_mask.png")
for c in range(0, len(sub_mask)):
print_image(sub_mask[c], "subimage_cluster_" + str(c) + ".png")
elif params.debug == 'plot':
plot_image(image)
for c in range(0, len(sub_mask)):
plot_image(sub_mask[c])
return image, sub_mask
|
54,470 | def _generate_slice_subplot(
trials: List[FrozenTrial],
param: str,
ax: "Axes",
cmap: "Colormap",
padding_ratio: float,
obj_values: List[Union[int, float]],
) -> "PathCollection":
x_values = []
y_values = []
trial_numbers = []
for t in trials:
if param in t.params:
x_values.append(t.params[param])
y_values.append(obj_values[t.number])
trial_numbers.append(t.number)
ax.set(xlabel=param, ylabel="Objective Value")
if _is_log_scale(trials, param):
ax.set_xscale("log")
xlim = _calc_lim_with_padding(x_values, padding_ratio, True)
ax.set_xlim(xlim[0], xlim[1])
else:
xlim = _calc_lim_with_padding(x_values, padding_ratio)
ax.set_xlim(xlim[0], xlim[1])
sc = ax.scatter(x_values, y_values, c=trial_numbers, cmap=cmap, edgecolors="grey")
ax.label_outer()
return sc
| def _generate_slice_subplot(
trials: List[FrozenTrial],
param: str,
ax: "Axes",
cmap: "Colormap",
padding_ratio: float,
obj_values: List[Union[int, float]],
) -> "PathCollection":
x_values = []
y_values = []
trial_numbers = []
for t, obj_v in zip(trials, obj_values):
if param in t.params:
x_values.append(t.params[param])
y_values.append(obj_v)
trial_numbers.append(t.number)
ax.set(xlabel=param, ylabel="Objective Value")
if _is_log_scale(trials, param):
ax.set_xscale("log")
xlim = _calc_lim_with_padding(x_values, padding_ratio, True)
ax.set_xlim(xlim[0], xlim[1])
else:
xlim = _calc_lim_with_padding(x_values, padding_ratio)
ax.set_xlim(xlim[0], xlim[1])
sc = ax.scatter(x_values, y_values, c=trial_numbers, cmap=cmap, edgecolors="grey")
ax.label_outer()
return sc
|
17,533 | def load_releases_tags():
tags = []
tags_fastcomp = []
info = load_releases_info()
for version, sha in sorted(info['releases'].items(), key=lambda x: version_key(x[0])):
tags.append(sha)
# Only include versions older than 1.39.0 in fastcomp releases
if version_key(version) < (2, 0, 0):
tags_fastcomp.append(sha)
if extra_release_tag:
tags.append(extra_release_tag)
# Explicitly add the currently installed SDK version. This could be a custom
# version (installed explicitly) so might be part of the main list loaded above.
version_file = sdk_path(os.path.join('upstream', '.emsdk_version'))
if os.path.exists(version_file):
with open(version_file) as f:
version = f.read()
version = version.split('-')[2]
if version not in tags:
tags.append(version)
return tags, tags_fastcomp
| def load_releases_tags():
tags = []
tags_fastcomp = []
info = load_releases_info()
for version, sha in sorted(info['releases'].items(), key=lambda x: version_key(x[0])):
tags.append(sha)
# Only include versions older than 1.39.0 in fastcomp releases
if version_key(version) < (2, 0, 0):
tags_fastcomp.append(sha)
if extra_release_tag:
tags.append(extra_release_tag)
# Explicitly add the currently installed SDK version. This could be a custom
# version (installed explicitly) so it might not be part of the main list loaded above.
version_file = sdk_path(os.path.join('upstream', '.emsdk_version'))
if os.path.exists(version_file):
with open(version_file) as f:
version = f.read()
version = version.split('-')[2]
if version not in tags:
tags.append(version)
return tags, tags_fastcomp
|
10,827 | def _legalize(module, dmm, fndesc):
"""
Legalize the code in the module.
Returns True if the module is legal for the rewrite pass that remove
unnecessary refcount.
"""
def valid_output(ty):
"""
Valid output are any type that does not need refcount
"""
model = dmm[ty]
return not model.contains_nrt_meminfo()
def valid_input(ty):
"""
Valid input are any type that does not need refcount except Array.
"""
return valid_output(ty) or isinstance(ty, types.Array)
# Ensure no reference to function marked as
# "numba_args_may_always_need_nrt"
try:
nmd = module.get_named_metadata("numba_args_may_always_need_nrt")
except KeyError:
# Nothing marked
pass
else:
# Has functions marked as "numba_args_may_always_need_nrt"
if len(nmd.operands) > 0:
# The pass is illegal for this compilation unit.
return False
# More legalization base on function type
argtypes = fndesc.argtypes
restype = fndesc.restype
calltypes = fndesc.calltypes
# Legalize function arguments
for argty in argtypes:
if not valid_input(argty):
return False
# Legalize function return
if not valid_output(restype):
return False
# Legalize all called functions
for callty in calltypes.values():
if callty is not None and not valid_output(callty.return_type):
return False
# Ensure no allocation
for fn in module.functions:
if fn.name.startswith("NRT_"):
if fn.name not in _accepted_nrtfns:
return False
return True
| def _legalize(module, dmm, fndesc):
"""
Legalize the code in the module.
Returns True if the module is legal for the rewrite pass that removes
unnecessary refcount.
"""
def valid_output(ty):
"""
Valid output are any type that does not need refcount
"""
model = dmm[ty]
return not model.contains_nrt_meminfo()
def valid_input(ty):
"""
Valid input are any type that does not need refcount except Array.
"""
return valid_output(ty) or isinstance(ty, types.Array)
# Ensure no reference to function marked as
# "numba_args_may_always_need_nrt"
try:
nmd = module.get_named_metadata("numba_args_may_always_need_nrt")
except KeyError:
# Nothing marked
pass
else:
# Has functions marked as "numba_args_may_always_need_nrt"
if len(nmd.operands) > 0:
# The pass is illegal for this compilation unit.
return False
# More legalization base on function type
argtypes = fndesc.argtypes
restype = fndesc.restype
calltypes = fndesc.calltypes
# Legalize function arguments
for argty in argtypes:
if not valid_input(argty):
return False
# Legalize function return
if not valid_output(restype):
return False
# Legalize all called functions
for callty in calltypes.values():
if callty is not None and not valid_output(callty.return_type):
return False
# Ensure no allocation
for fn in module.functions:
if fn.name.startswith("NRT_"):
if fn.name not in _accepted_nrtfns:
return False
return True
|
30,086 | def test_do_sourmash_compute_multik_protein_input_bad_ksize():
with utils.TempDirectory() as location:
testdata1 = utils.get_test_data('short-protein.fa')
status, out, err = utils.runscript('sourmash',
['compute', '-k', '20,32',
'--protein', '--no-dna',
'--input-is-protein',
testdata1],
in_directory=location,
fail_ok=True)
outfile = os.path.join(location, 'short-protein.fa.sig')
assert not os.path.exists(outfile)
assert 'protein ksizes must be divisible by 3' in err
| def test_do_sourmash_compute_multik_protein_input_bad_ksize():
with utils.TempDirectory() as location:
testdata1 = utils.get_test_data('short-protein.fa')
status, out, err = utils.runscript('sourmash',
['compute', '-k', '20,32',
'--protein', '--no-dna',
'--input-is-protein',
testdata1],
in_directory=location,
fail_ok=True)
outfile = os.path.join(location, 'short-protein.fa.sig')
assert status != 0
assert 'protein ksizes must be divisible by 3' in err
|
11,170 | def _path(route, view, kwargs=None, name=None, Pattern=None):
from django.views import View
if kwargs is not None and not isinstance(kwargs, dict):
raise TypeError(
f'Keyword arguments must be dict, not {type(kwargs).__name__}. '
f'Hint: You are probably trying to pass the URL name as a positional argument.'
)
if isinstance(view, (list, tuple)):
# For include(...) processing.
pattern = Pattern(route, is_endpoint=False)
urlconf_module, app_name, namespace = view
return URLResolver(
pattern,
urlconf_module,
kwargs,
app_name=app_name,
namespace=namespace,
)
elif callable(view):
pattern = Pattern(route, name=name, is_endpoint=True)
return URLPattern(pattern, view, kwargs, name)
elif isinstance(view, View):
view_cls_name = view.__class__.__name__
raise TypeError(
f'view must be a callable, pass {view_cls_name}.as_view(), not '
f'{view_cls_name}().'
)
else:
raise TypeError('view must be a callable or a list/tuple in the case of include().')
| def _path(route, view, kwargs=None, name=None, Pattern=None):
from django.views import View
if not isinstance(kwargs, dict):
raise TypeError(
f'Keyword arguments must be dict, not {type(kwargs).__name__}. '
f'Hint: You are probably trying to pass the URL name as a positional argument.'
)
if isinstance(view, (list, tuple)):
# For include(...) processing.
pattern = Pattern(route, is_endpoint=False)
urlconf_module, app_name, namespace = view
return URLResolver(
pattern,
urlconf_module,
kwargs,
app_name=app_name,
namespace=namespace,
)
elif callable(view):
pattern = Pattern(route, name=name, is_endpoint=True)
return URLPattern(pattern, view, kwargs, name)
elif isinstance(view, View):
view_cls_name = view.__class__.__name__
raise TypeError(
f'view must be a callable, pass {view_cls_name}.as_view(), not '
f'{view_cls_name}().'
)
else:
raise TypeError('view must be a callable or a list/tuple in the case of include().')
|
45,132 | def create_if_missing(
flag_name: str,
is_enabled: bool = False,
client_data: Optional[dict] = None,
bucketer: Optional[AbstractBucketer] = None,
conditions: Optional[Iterable[Condition]] = None,
client: FeatureFlagClient = None,
) -> Optional[FeatureFlag]:
"""
Create a feature flag if a flag matching the given name does not
already exist.
Args:
flag_name: the name of the feature flag
is_enabled: the initial enabled/disabled state of the flag if
this function creates it
client_data: arbitrary data that we should store with the flag
bucketer: an optional bucketer from the flipper.bucketing module, e.g.
PercentageBucketer, to use when determining if the flag
is enabled
conditions: an optional iterable of Conditions against which we will
check input data to determine if a flag is enabled
client: The FeatureFlagClient instance to use. Defaults to a client
configured to look at an in-memory feature store.
Returns:
FeatureFlag or None: Returns a created or existing FeatureFlag, or None
if feature flagging is disabled.
"""
if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value():
return
if not client:
client = get_features_client()
# If the flag exists in the feature flag store, we'll consider the
# enabled state, bucketer, and conditions currently saved in the
# feature flag store as canonical.
if client.exists(flag_name):
return client.get(flag_name)
flag = client.create(flag_name, is_enabled=is_enabled, client_data=client_data)
if bucketer:
flag.set_bucketer(bucketer)
if conditions:
flag.set_conditions(conditions)
return flag
| def create_if_missing(
flag_name: str,
is_enabled: bool = False,
client_data: Optional[dict] = None,
bucketer: Optional[AbstractBucketer] = None,
conditions: Optional[Iterable[Condition]] = None,
client: FeatureFlagClient = None,
) -> Optional[FeatureFlag]:
"""
Create a feature flag if a flag matching the given name does not
already exist.
Args:
flag_name: the name of the feature flag
is_enabled: the initial enabled/disabled state of the flag if
this function creates it
client_data: arbitrary data that we should store with the flag
bucketer: an optional bucketer from the `flipper.bucketing` module, e.g.
PercentageBucketer, to use when determining if the flag
is enabled
conditions: an optional iterable of Conditions against which we will
check input data to determine if a flag is enabled
client: The FeatureFlagClient instance to use. Defaults to a client
configured to look at an in-memory feature store.
Returns:
FeatureFlag or None: Returns a created or existing FeatureFlag, or None
if feature flagging is disabled.
"""
if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value():
return
if not client:
client = get_features_client()
# If the flag exists in the feature flag store, we'll consider the
# enabled state, bucketer, and conditions currently saved in the
# feature flag store as canonical.
if client.exists(flag_name):
return client.get(flag_name)
flag = client.create(flag_name, is_enabled=is_enabled, client_data=client_data)
if bucketer:
flag.set_bucketer(bucketer)
if conditions:
flag.set_conditions(conditions)
return flag
|
46,318 | def show_plugin_info():
from .plugins import _initialize_plugins, _npe2, plugin_manager
# if a plugin widget has been requested, this will fail immediately
# if the requested plugin/widget is not available.
_initialize_plugins()
plugin_manager.discover_widgets()
shared_dict = {}
for _key, (_plugin_name, _widget_names) in _npe2.widget_iterator():
shared_dict.setdefault(_key, {}).setdefault('npe2', []).append(
(_plugin_name, _widget_names)
)
for _key, (_plugin_name, _widget_dict) in plugin_manager.iter_widgets():
shared_dict.setdefault(_key, []).setdefault(
'plugin_manager', []
).append((_plugin_name, list(_widget_dict.keys())))
print('Available plugins')
for key, value in shared_dict.items():
for plugin_type, plugins in value.items():
print(f"\nTYPE: {key} - {plugin_type.upper()}")
for pname, wnames in plugins:
print(f" PLUGIN_NAME: '{pname}'")
for wname in wnames:
print(f" WIDGET_NAME: '{wname}'")
| def show_plugin_info():
from .plugins import _initialize_plugins, _npe2, plugin_manager
# if a plugin widget has been requested, this will fail immediately
# if the requested plugin/widget is not available.
_initialize_plugins()
plugin_manager.discover_widgets()
shared_dict = {}
for _key, (_plugin_name, _widget_names) in _npe2.widget_iterator():
shared_dict.setdefault(_key, {}).setdefault('npe2', []).append(
(_plugin_name, _widget_names)
)
for _key, (_plugin_name, _widget_dict) in plugin_manager.iter_widgets():
shared_dict.setdefault(_key, {}).setdefault(
'plugin_manager', []
).append((_plugin_name, list(_widget_dict.keys())))
print('Available plugins')
for key, value in shared_dict.items():
for plugin_type, plugins in value.items():
print(f"\nTYPE: {key} - {plugin_type.upper()}")
for pname, wnames in plugins:
print(f" PLUGIN_NAME: '{pname}'")
for wname in wnames:
print(f" WIDGET_NAME: '{wname}'")
|
28,242 | def from_dict_to_native(dct: RunDescriberDicts) -> SomeRunDescriber:
"""
Convert a dict (usually coming from json.loads) into a RunDescriber
object according to the version specified in the dict
"""
dct_version = dct['version']
if dct_version == 0:
return v0.RunDescriber._from_dict(cast(RunDescriberV0Dict, dct))
elif dct_version == 1:
return current.RunDescriber._from_dict(cast(RunDescriberV1Dict, dct))
else:
raise RuntimeError()
| def from_dict_to_native(dct: RunDescriberDicts) -> SomeRunDescriber:
"""
Convert a dict (usually coming from json.loads) into a RunDescriber
object according to the version specified in the dict
"""
dct_version = dct['version']
if dct_version == 0:
return v0.RunDescriber._from_dict(cast(RunDescriberV0Dict, dct))
elif dct_version == 1:
return current.RunDescriber._from_dict(cast(RunDescriberV1Dict, dct))
else:
raise RuntimeError(f"Unknown version of run describer dictionary, can't deserialize. The dictionary is {dct!r}")
|
8,523 | def owner_org_validator(key: FlattenKey, data: FlattenDataDict,
errors: FlattenErrorDict, context: Context) -> Any:
"""Validate organization for the dataset.
Depending on the settings and user's permissions, this validator checks
whether organization is optional and ensures that specified organization
can be set as an owher of dataset.
"""
value = data.get(key)
if value is missing or value is None:
if not authz.check_config_permission('create_unowned_dataset'):
raise Invalid(_('An organization must be provided'))
data.pop(key, None)
raise df.StopOnError
model = context['model']
user = model.User.get(context['user'])
package = context.get('package')
if value == '':
if not authz.check_config_permission('create_unowned_dataset'):
raise Invalid(_('An organization must be provided'))
return
if (authz.check_config_permission('allow_dataset_collaborators')
and not authz.check_config_permission('allow_collaborators_to_change_owner_org')):
if package and user and not user.sysadmin:
is_collaborator = authz.user_is_collaborator_on_dataset(
user.id, package.id, ['admin', 'editor'])
if is_collaborator:
# User is a collaborator, check if it's also a member with
# edit rights of the current organization (redundant, but possible)
user_orgs = logic.get_action(
'organization_list_for_user')(
{'ignore_auth': True}, {'id': user.id, 'permission': 'update_dataset'})
user_is_org_member = package.owner_org in [org['id'] for org in user_orgs]
if data.get(key) != package.owner_org and not user_is_org_member:
raise Invalid(_('You cannot move this dataset to another organization'))
group = model.Group.get(value)
if not group:
raise Invalid(_('Organization does not exist'))
group_id = group.id
if not package or (package and package.owner_org != group_id):
# This is a new dataset or we are changing the organization
if not context.get(u'ignore_auth', False) and (not user or not(
user.sysadmin or authz.has_user_permission_for_group_or_org(
group_id, user.name, 'create_dataset'))):
raise Invalid(_('You cannot add a dataset to this organization'))
data[key] = group_id
| def owner_org_validator(key: FlattenKey, data: FlattenDataDict,
errors: FlattenErrorDict, context: Context) -> Any:
"""Validate organization for the dataset.
Depending on the settings and user's permissions, this validator checks
whether organization is optional and ensures that specified organization
can be set as an owner of dataset.
"""
value = data.get(key)
if value is missing or value is None:
if not authz.check_config_permission('create_unowned_dataset'):
raise Invalid(_('An organization must be provided'))
data.pop(key, None)
raise df.StopOnError
model = context['model']
user = model.User.get(context['user'])
package = context.get('package')
if value == '':
if not authz.check_config_permission('create_unowned_dataset'):
raise Invalid(_('An organization must be provided'))
return
if (authz.check_config_permission('allow_dataset_collaborators')
and not authz.check_config_permission('allow_collaborators_to_change_owner_org')):
if package and user and not user.sysadmin:
is_collaborator = authz.user_is_collaborator_on_dataset(
user.id, package.id, ['admin', 'editor'])
if is_collaborator:
# User is a collaborator, check if it's also a member with
# edit rights of the current organization (redundant, but possible)
user_orgs = logic.get_action(
'organization_list_for_user')(
{'ignore_auth': True}, {'id': user.id, 'permission': 'update_dataset'})
user_is_org_member = package.owner_org in [org['id'] for org in user_orgs]
if data.get(key) != package.owner_org and not user_is_org_member:
raise Invalid(_('You cannot move this dataset to another organization'))
group = model.Group.get(value)
if not group:
raise Invalid(_('Organization does not exist'))
group_id = group.id
if not package or (package and package.owner_org != group_id):
# This is a new dataset or we are changing the organization
if not context.get(u'ignore_auth', False) and (not user or not(
user.sysadmin or authz.has_user_permission_for_group_or_org(
group_id, user.name, 'create_dataset'))):
raise Invalid(_('You cannot add a dataset to this organization'))
data[key] = group_id
|
30,070 | def _download_bundle_from_master(dcos_api_session, master_index, bundle):
""" Download DC/OS diagnostics bundle from a master
:param dcos_api_session: dcos_api_session fixture
:param master_index: master index from dcos_api_session.masters array
:param bundle: bundle name to download from master
"""
assert len(dcos_api_session.masters) >= master_index + 1, '{} masters required. Got {}'.format(
master_index + 1, len(dcos_api_session.masters))
health_url = dcos_api_session.default_url.copy(
query='cache=0',
path='system/health/v1',
)
diagnostics = Diagnostics(
default_url=health_url,
masters=dcos_api_session.masters,
all_slaves=dcos_api_session.all_slaves,
session=dcos_api_session.copy().session,
)
bundles = diagnostics.get_diagnostics_reports()
assert bundle in bundles, 'not found {} in {}'.format(bundle, bundles)
expected_common_files = ['dmesg_-T.output.gz',
'ip_addr.output.gz',
'ip_route.output.gz',
'ps_aux_ww_Z.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1vips.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1records.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricsdefault.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricsdns.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricsmesos_listener.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricslashup.output.gz',
'timedatectl.output.gz',
'binsh_-c_cat etc*-release.output.gz',
'systemctl_list-units_dcos*.output.gz',
'sestatus.output.gz',
'iptables-save.output.gz',
'ip6tables-save.output.gz',
'ipset_list.output.gz',
'opt/mesosphere/active.buildinfo.full.json.gz',
'opt/mesosphere/etc/dcos-version.json.gz',
'opt/mesosphere/etc/expanded.config.json.gz',
'opt/mesosphere/etc/user.config.yaml.gz',
'dcos-diagnostics-health.json',
'var/lib/dcos/cluster-id.gz',
'proc/cmdline.gz',
'proc/cpuinfo.gz',
'proc/meminfo.gz',
'proc/self/mountinfo.gz',
'optmesospherebindetect_ip.output.gz',
'sysctl_-a.output.gz',
]
# these files are expected to be in archive for a master host
expected_master_files = [
'binsh_-c_cat proc`systemctl show dcos-mesos-master.service -p MainPID| cut -d\'=\' -f2`environ.output.gz',
'5050-quota.json',
'5050-overlay-master_state.json.gz',
'dcos-mesos-master.service.gz',
'var/lib/dcos/exhibitor/zookeeper/snapshot/myid.gz',
'var/lib/dcos/exhibitor/conf/zoo.cfg.gz',
'var/lib/dcos/mesos/log/mesos-master.log.gz',
'var/lib/dcos/mesos/log/mesos-master.log.1.gz',
'var/lib/dcos/mesos/log/mesos-master.log.2.gz.gz',
'var/lib/dcos/mesos/log/mesos-master.log.3.gz.gz',
] + expected_common_files
expected_agent_common_files = [
'5051-containers.json',
'5051-containerizer_debug.json'
'5051-overlay-agent_overlay.json',
'var/log/mesos/mesos-agent.log.gz',
'docker_--version.output.gz',
'docker_ps.output.gz',
]
# for agent host
expected_agent_files = [
'dcos-mesos-slave.service.gz',
'binsh_-c_cat proc`systemctl show dcos-mesos-slave.service -p MainPID| cut -d\'=\' -f2`environ.output.gz'
] + expected_agent_common_files + expected_common_files
# for public agent host
expected_public_agent_files = [
'dcos-mesos-slave-public.service.gz',
'binsh_-c_cat proc`systemctl show dcos-mesos-slave-public.service -p MainPID| cut -d\'=\' -f2`environ.output.gz'
] + expected_agent_common_files + expected_common_files
def _read_from_zip(z: zipfile.ZipFile, item: str, to_json=True):
# raises KeyError if item is not in zipfile.
item_content = z.read(item).decode()
if to_json:
# raises ValueError if cannot deserialize item_content.
return json.loads(item_content)
return item_content
def _get_dcos_diagnostics_health(z: zipfile.ZipFile, item: str):
# try to load dcos-diagnostics health report and validate the report is for this host
try:
_health_report = _read_from_zip(z, item)
except KeyError:
# we did not find a key in archive, let's take a look at items in archive and try to read
# diagnostics logs.
# namelist() gets a list of all items in a zip archive.
logging.info(z.namelist())
# summaryErrorsReport.txt and summaryReport.txt are diagnostic job log files.
for log in ('summaryErrorsReport.txt', 'summaryReport.txt'):
try:
log_data = _read_from_zip(z, log, to_json=False)
logging.info("{}:\n{}".format(log, log_data))
except KeyError:
logging.info("Could not read {}".format(log))
raise
except ValueError:
logging.info("Could not deserialize dcos-diagnostics-health")
raise
return _health_report
with tempfile.TemporaryDirectory() as tmp_dir:
bundle_full_location = os.path.join(tmp_dir, bundle)
with open(bundle_full_location, 'wb') as f:
r = dcos_api_session.health.get(os.path.join('/report/diagnostics/serve', bundle), stream=True,
node=dcos_api_session.masters[master_index])
for chunk in r.iter_content(1024):
f.write(chunk)
# validate bundle zip file.
assert zipfile.is_zipfile(bundle_full_location)
z = zipfile.ZipFile(bundle_full_location)
# get a list of all files in a zip archive.
archived_items = z.namelist()
# validate error log is empty
if 'summaryErrorsReport.txt' in archived_items:
log_data = _read_from_zip(z, 'summaryErrorsReport.txt', to_json=False)
raise AssertionError('summaryErrorsReport.txt must be empty. Got {}'.format(log_data))
# validate all files in zip archive are not empty
for item in archived_items:
assert z.getinfo(item).file_size, 'item {} is empty'.format(item)
# make sure all required log files for master node are in place.
for master_ip in dcos_api_session.masters:
master_folder = master_ip + '_master/'
# try to load dcos-diagnostics health report and validate the report is for this host
health_report = _get_dcos_diagnostics_health(z, master_folder + 'dcos-diagnostics-health.json')
assert 'ip' in health_report
assert health_report['ip'] == master_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(master_folder + 'dcos-mesos-master.service.gz')
verify_unit_response(gzipped_unit_output, 100)
verify_archived_items(master_folder, archived_items, expected_master_files)
gzipped_state_output = z.open(master_folder + '5050-master_state.json.gz')
validate_state(gzipped_state_output)
# make sure all required log files for agent node are in place.
for slave_ip in dcos_api_session.slaves:
agent_folder = slave_ip + '_agent/'
# try to load dcos-diagnostics health report and validate the report is for this host
health_report = _get_dcos_diagnostics_health(z, agent_folder + 'dcos-diagnostics-health.json')
assert 'ip' in health_report
assert health_report['ip'] == slave_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(agent_folder + 'dcos-mesos-slave.service.gz')
verify_unit_response(gzipped_unit_output, 100)
verify_archived_items(agent_folder, archived_items, expected_agent_files)
# make sure all required log files for public agent node are in place.
for public_slave_ip in dcos_api_session.public_slaves:
agent_public_folder = public_slave_ip + '_agent_public/'
# try to load dcos-diagnostics health report and validate the report is for this host
health_report = _get_dcos_diagnostics_health(z, agent_public_folder + 'dcos-diagnostics-health.json')
assert 'ip' in health_report
assert health_report['ip'] == public_slave_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(agent_public_folder + 'dcos-mesos-slave-public.service.gz')
verify_unit_response(gzipped_unit_output, 100)
verify_archived_items(agent_public_folder, archived_items, expected_public_agent_files)
| def _download_bundle_from_master(dcos_api_session, master_index, bundle):
""" Download DC/OS diagnostics bundle from a master
:param dcos_api_session: dcos_api_session fixture
:param master_index: master index from dcos_api_session.masters array
:param bundle: bundle name to download from master
"""
assert len(dcos_api_session.masters) >= master_index + 1, '{} masters required. Got {}'.format(
master_index + 1, len(dcos_api_session.masters))
health_url = dcos_api_session.default_url.copy(
query='cache=0',
path='system/health/v1',
)
diagnostics = Diagnostics(
default_url=health_url,
masters=dcos_api_session.masters,
all_slaves=dcos_api_session.all_slaves,
session=dcos_api_session.copy().session,
)
bundles = diagnostics.get_diagnostics_reports()
assert bundle in bundles, 'not found {} in {}'.format(bundle, bundles)
expected_common_files = ['dmesg_-T.output.gz',
'ip_addr.output.gz',
'ip_route.output.gz',
'ps_aux_ww_Z.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1vips.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1records.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricsdefault.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricsdns.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricsmesos_listener.output.gz',
'optmesospherebincurl_-s_-S_http:localhost:62080v1metricslashup.output.gz',
'timedatectl.output.gz',
'binsh_-c_cat etc*-release.output.gz',
'systemctl_list-units_dcos*.output.gz',
'sestatus.output.gz',
'iptables-save.output.gz',
'ip6tables-save.output.gz',
'ipset_list.output.gz',
'opt/mesosphere/active.buildinfo.full.json.gz',
'opt/mesosphere/etc/dcos-version.json.gz',
'opt/mesosphere/etc/expanded.config.json.gz',
'opt/mesosphere/etc/user.config.yaml.gz',
'dcos-diagnostics-health.json',
'var/lib/dcos/cluster-id.gz',
'proc/cmdline.gz',
'proc/cpuinfo.gz',
'proc/meminfo.gz',
'proc/self/mountinfo.gz',
'optmesospherebindetect_ip.output.gz',
'sysctl_-a.output.gz',
]
# these files are expected to be in archive for a master host
expected_master_files = [
'binsh_-c_cat proc`systemctl show dcos-mesos-master.service -p MainPID| cut -d\'=\' -f2`environ.output.gz',
'5050-quota.json',
'5050-overlay-master_state.json.gz',
'dcos-mesos-master.service.gz',
'var/lib/dcos/exhibitor/zookeeper/snapshot/myid.gz',
'var/lib/dcos/exhibitor/conf/zoo.cfg.gz',
'var/lib/dcos/mesos/log/mesos-master.log.gz',
'var/lib/dcos/mesos/log/mesos-master.log.1.gz',
'var/lib/dcos/mesos/log/mesos-master.log.2.gz.gz',
'var/lib/dcos/mesos/log/mesos-master.log.3.gz.gz',
] + expected_common_files
expected_agent_common_files = [
'5051-containers.json',
'5051-containerizer_debug.json',
'5051-overlay-agent_overlay.json',
'var/log/mesos/mesos-agent.log.gz',
'docker_--version.output.gz',
'docker_ps.output.gz',
]
# for agent host
expected_agent_files = [
'dcos-mesos-slave.service.gz',
'binsh_-c_cat proc`systemctl show dcos-mesos-slave.service -p MainPID| cut -d\'=\' -f2`environ.output.gz'
] + expected_agent_common_files + expected_common_files
# for public agent host
expected_public_agent_files = [
'dcos-mesos-slave-public.service.gz',
'binsh_-c_cat proc`systemctl show dcos-mesos-slave-public.service -p MainPID| cut -d\'=\' -f2`environ.output.gz'
] + expected_agent_common_files + expected_common_files
def _read_from_zip(z: zipfile.ZipFile, item: str, to_json=True):
# raises KeyError if item is not in zipfile.
item_content = z.read(item).decode()
if to_json:
# raises ValueError if cannot deserialize item_content.
return json.loads(item_content)
return item_content
def _get_dcos_diagnostics_health(z: zipfile.ZipFile, item: str):
# try to load dcos-diagnostics health report and validate the report is for this host
try:
_health_report = _read_from_zip(z, item)
except KeyError:
# we did not find a key in archive, let's take a look at items in archive and try to read
# diagnostics logs.
# namelist() gets a list of all items in a zip archive.
logging.info(z.namelist())
# summaryErrorsReport.txt and summaryReport.txt are diagnostic job log files.
for log in ('summaryErrorsReport.txt', 'summaryReport.txt'):
try:
log_data = _read_from_zip(z, log, to_json=False)
logging.info("{}:\n{}".format(log, log_data))
except KeyError:
logging.info("Could not read {}".format(log))
raise
except ValueError:
logging.info("Could not deserialize dcos-diagnostics-health")
raise
return _health_report
with tempfile.TemporaryDirectory() as tmp_dir:
bundle_full_location = os.path.join(tmp_dir, bundle)
with open(bundle_full_location, 'wb') as f:
r = dcos_api_session.health.get(os.path.join('/report/diagnostics/serve', bundle), stream=True,
node=dcos_api_session.masters[master_index])
for chunk in r.iter_content(1024):
f.write(chunk)
# validate bundle zip file.
assert zipfile.is_zipfile(bundle_full_location)
z = zipfile.ZipFile(bundle_full_location)
# get a list of all files in a zip archive.
archived_items = z.namelist()
# validate error log is empty
if 'summaryErrorsReport.txt' in archived_items:
log_data = _read_from_zip(z, 'summaryErrorsReport.txt', to_json=False)
raise AssertionError('summaryErrorsReport.txt must be empty. Got {}'.format(log_data))
# validate all files in zip archive are not empty
for item in archived_items:
assert z.getinfo(item).file_size, 'item {} is empty'.format(item)
# make sure all required log files for master node are in place.
for master_ip in dcos_api_session.masters:
master_folder = master_ip + '_master/'
# try to load dcos-diagnostics health report and validate the report is for this host
health_report = _get_dcos_diagnostics_health(z, master_folder + 'dcos-diagnostics-health.json')
assert 'ip' in health_report
assert health_report['ip'] == master_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(master_folder + 'dcos-mesos-master.service.gz')
verify_unit_response(gzipped_unit_output, 100)
verify_archived_items(master_folder, archived_items, expected_master_files)
gzipped_state_output = z.open(master_folder + '5050-master_state.json.gz')
validate_state(gzipped_state_output)
# make sure all required log files for agent node are in place.
for slave_ip in dcos_api_session.slaves:
agent_folder = slave_ip + '_agent/'
# try to load dcos-diagnostics health report and validate the report is for this host
health_report = _get_dcos_diagnostics_health(z, agent_folder + 'dcos-diagnostics-health.json')
assert 'ip' in health_report
assert health_report['ip'] == slave_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(agent_folder + 'dcos-mesos-slave.service.gz')
verify_unit_response(gzipped_unit_output, 100)
verify_archived_items(agent_folder, archived_items, expected_agent_files)
# make sure all required log files for public agent node are in place.
for public_slave_ip in dcos_api_session.public_slaves:
agent_public_folder = public_slave_ip + '_agent_public/'
# try to load dcos-diagnostics health report and validate the report is for this host
health_report = _get_dcos_diagnostics_health(z, agent_public_folder + 'dcos-diagnostics-health.json')
assert 'ip' in health_report
assert health_report['ip'] == public_slave_ip
# make sure systemd unit output is correct and does not contain error message
gzipped_unit_output = z.open(agent_public_folder + 'dcos-mesos-slave-public.service.gz')
verify_unit_response(gzipped_unit_output, 100)
verify_archived_items(agent_public_folder, archived_items, expected_public_agent_files)
|
29,919 | def run_prepro_levels(rgi_version=None, rgi_reg=None, border=None,
output_folder='', working_dir='', dem_source='',
is_test=False, test_nr=4, demo=False, test_rgidf=None,
test_intersects_file=None, test_topofile=None,
test_crudir=None, disable_mp=False, timeout=0,
max_level=4, logging_level='WORKFLOW',
map_maxd=None, map_d1=None):
"""Does the actual job.
Parameters
----------
rgi_version : str
the RGI version to use (defaults to cfg.PARAMS)
rgi_reg : str
the RGI region to process
border : int
the number of pixels at the maps border
output_folder : str
path to the output folder (where to put the preprocessed tar files)
dem_source : str
which DEM source to use: default, SOURCE_NAME or ALL
working_dir : str
path to the OGGM working directory
is_test : bool
to test on a couple of glaciers only!
test_nr : int
if is_test = True: Amount of glaciers to test
demo : bool
to run the prepro for the list of demo glaciers
test_rgidf : shapefile
for testing purposes only
test_intersects_file : shapefile
for testing purposes only
test_topofile : str
for testing purposes only
test_crudir : str
for testing purposes only
disable_mp : bool
disable multiprocessing
max_level : int
the maximum pre-processing level before stopping
logging_level : str
the logging level to use (DEBUG, INFO, WARNING, WORKFLOW)
map_maxd : float
maximum resolution [m] of spatial grid resolution
map_d1 : float
equation parameter which is used to calculate the grid resolution
"""
# TODO: temporarily silence Fiona deprecation warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Input check
if max_level not in [1, 2, 3, 4]:
raise InvalidParamsError('max_level should be one of [1, 2, 3, 4]')
# Time
start = time.time()
def _time_log():
# Log util
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.workflow('OGGM prepro_levels is done! Time needed: '
'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))
# Initialize OGGM and set up the run parameters
cfg.initialize(logging_level=logging_level)
# Local paths
utils.mkdir(working_dir)
cfg.PATHS['working_dir'] = working_dir
# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = not disable_mp
# How many grid points around the glacier?
# Make it large if you expect your glaciers to grow large
cfg.PARAMS['border'] = border
# Size of the spatial map
cfg.PARAMS['dmax'] = map_maxd if map_maxd else cfg.PARAMS['dmax']
cfg.PARAMS['d1'] = map_d1 if map_d1 else cfg.PARAMS['d1']
# Set to True for operational runs
cfg.PARAMS['continue_on_error'] = True
# Timeout
cfg.PARAMS['task_timeout'] = timeout
# For statistics
climate_periods = [1920, 1960, 2000]
if rgi_version is None:
rgi_version = cfg.PARAMS['rgi_version']
rgi_dir_name = 'RGI{}'.format(rgi_version)
border_dir_name = 'b_{:03d}'.format(border)
base_dir = os.path.join(output_folder, rgi_dir_name, border_dir_name)
# Add a package version file
utils.mkdir(base_dir)
opath = os.path.join(base_dir, 'package_versions.txt')
with open(opath, 'w') as vfile:
vfile.write(utils.show_versions(logger=log))
if demo:
rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
elif test_rgidf is None:
# Get the RGI file
rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
version=rgi_version))
# We use intersects
rgif = utils.get_rgi_intersects_region_file(rgi_reg,
version=rgi_version)
cfg.set_intersects_db(rgif)
else:
rgidf = test_rgidf
cfg.set_intersects_db(test_intersects_file)
if is_test:
# Just for fun
rgidf = rgidf.sample(test_nr)
# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)
log.workflow('Starting prepro run for RGI reg: {} '
'and border: {}'.format(rgi_reg, border))
log.workflow('Number of glaciers: {}'.format(len(rgidf)))
# Input
if test_topofile:
cfg.PATHS['dem_file'] = test_topofile
# L1 - initialize working directories
# Which DEM source?
if dem_source.upper() == 'ALL':
# This is the complex one, just do the job an leave
log.workflow('Running prepro on ALL sources')
for i, s in enumerate(utils.DEM_SOURCES):
rs = i == 0
rgidf['DEM_SOURCE'] = s
log.workflow('Running prepro on sources: {}'.format(s))
gdirs = []
for_task = []
for _, entity in rgidf.iterrows():
gdir = GlacierDirectory(entity, reset=rs)
for_task.append((gdir, dict(entity=entity)))
gdirs.append(gdir)
workflow.execute_entity_task(tasks.define_glacier_region, for_task)
workflow.execute_entity_task(_rename_dem_folder, gdirs, source=s)
# make a GeoTiff mask of the glacier, choose any source
workflow.execute_entity_task(gis.rasterio_glacier_mask,
gdirs, source='ALL')
# Compress all in output directory
l_base_dir = os.path.join(base_dir, 'L1')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
_time_log()
return
if dem_source:
# Force a given source
rgidf['DEM_SOURCE'] = dem_source.upper()
# L1 - go
gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L1', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# L1 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L1')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 1:
_time_log()
return
# L2 - Tasks
# Pre-download other files just in case
if test_crudir is None:
_ = utils.get_cru_file(var='tmp')
_ = utils.get_cru_file(var='pre')
else:
cfg.PATHS['cru_dir'] = test_crudir
workflow.execute_entity_task(tasks.process_cru_data, gdirs)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L2', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# L2 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L2')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 2:
_time_log()
return
# L3 - Tasks
task_list = [
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.compute_downstream_line,
tasks.compute_downstream_bedshape,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
tasks.local_t_star,
tasks.mu_star_calibration,
tasks.prepare_for_inversion,
tasks.mass_conservation_inversion,
tasks.filter_inversion_output,
tasks.init_present_time_glacier
]
for task in task_list:
workflow.execute_entity_task(task, gdirs)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L3', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
utils.compile_climate_statistics(gdirs, add_climate_period=climate_periods,
path=opath)
# L3 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L3')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 3:
_time_log()
return
# L4 - No tasks: add some stats for consistency and make the dirs small
sum_dir = os.path.join(base_dir, 'L4', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# Copy mini data to new dir
base_dir = os.path.join(base_dir, 'L4')
mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir, gdirs,
base_dir=base_dir)
# L4 OK - compress all in output directory
workflow.execute_entity_task(utils.gdir_to_tar, mini_gdirs, delete=True)
utils.base_dir_to_tar(base_dir)
_time_log()
| def run_prepro_levels(rgi_version=None, rgi_reg=None, border=None,
output_folder='', working_dir='', dem_source='',
is_test=False, test_nr=4, demo=False, test_rgidf=None,
test_intersects_file=None, test_topofile=None,
test_crudir=None, disable_mp=False, timeout=0,
max_level=4, logging_level='WORKFLOW',
map_maxd=None, map_d1=None):
"""Does the actual job.
Parameters
----------
rgi_version : str
the RGI version to use (defaults to cfg.PARAMS)
rgi_reg : str
the RGI region to process
border : int
the number of pixels at the maps border
output_folder : str
path to the output folder (where to put the preprocessed tar files)
dem_source : str
which DEM source to use: default, SOURCE_NAME or ALL
working_dir : str
path to the OGGM working directory
is_test : bool
to test on a couple of glaciers only!
test_nr : int
if is_test = True: Amount of glaciers to test
demo : bool
to run the prepro for the list of demo glaciers
test_rgidf : shapefile
for testing purposes only
test_intersects_file : shapefile
for testing purposes only
test_topofile : str
for testing purposes only
test_crudir : str
for testing purposes only
disable_mp : bool
disable multiprocessing
max_level : int
the maximum pre-processing level before stopping
logging_level : str
the logging level to use (DEBUG, INFO, WARNING, WORKFLOW)
map_dmax : float
maximum resolution [m] of spatial grid resolution
map_d1 : float
equation parameter which is used to calculate the grid resolution
"""
# TODO: temporarily silence Fiona deprecation warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Input check
if max_level not in [1, 2, 3, 4]:
raise InvalidParamsError('max_level should be one of [1, 2, 3, 4]')
# Time
start = time.time()
def _time_log():
# Log util
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.workflow('OGGM prepro_levels is done! Time needed: '
'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))
# Initialize OGGM and set up the run parameters
cfg.initialize(logging_level=logging_level)
# Local paths
utils.mkdir(working_dir)
cfg.PATHS['working_dir'] = working_dir
# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = not disable_mp
# How many grid points around the glacier?
# Make it large if you expect your glaciers to grow large
cfg.PARAMS['border'] = border
# Size of the spatial map
cfg.PARAMS['dmax'] = map_maxd if map_maxd else cfg.PARAMS['dmax']
cfg.PARAMS['d1'] = map_d1 if map_d1 else cfg.PARAMS['d1']
# Set to True for operational runs
cfg.PARAMS['continue_on_error'] = True
# Timeout
cfg.PARAMS['task_timeout'] = timeout
# For statistics
climate_periods = [1920, 1960, 2000]
if rgi_version is None:
rgi_version = cfg.PARAMS['rgi_version']
rgi_dir_name = 'RGI{}'.format(rgi_version)
border_dir_name = 'b_{:03d}'.format(border)
base_dir = os.path.join(output_folder, rgi_dir_name, border_dir_name)
# Add a package version file
utils.mkdir(base_dir)
opath = os.path.join(base_dir, 'package_versions.txt')
with open(opath, 'w') as vfile:
vfile.write(utils.show_versions(logger=log))
if demo:
rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
elif test_rgidf is None:
# Get the RGI file
rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
version=rgi_version))
# We use intersects
rgif = utils.get_rgi_intersects_region_file(rgi_reg,
version=rgi_version)
cfg.set_intersects_db(rgif)
else:
rgidf = test_rgidf
cfg.set_intersects_db(test_intersects_file)
if is_test:
# Just for fun
rgidf = rgidf.sample(test_nr)
# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)
log.workflow('Starting prepro run for RGI reg: {} '
'and border: {}'.format(rgi_reg, border))
log.workflow('Number of glaciers: {}'.format(len(rgidf)))
# Input
if test_topofile:
cfg.PATHS['dem_file'] = test_topofile
# L1 - initialize working directories
# Which DEM source?
if dem_source.upper() == 'ALL':
# This is the complex one, just do the job an leave
log.workflow('Running prepro on ALL sources')
for i, s in enumerate(utils.DEM_SOURCES):
rs = i == 0
rgidf['DEM_SOURCE'] = s
log.workflow('Running prepro on sources: {}'.format(s))
gdirs = []
for_task = []
for _, entity in rgidf.iterrows():
gdir = GlacierDirectory(entity, reset=rs)
for_task.append((gdir, dict(entity=entity)))
gdirs.append(gdir)
workflow.execute_entity_task(tasks.define_glacier_region, for_task)
workflow.execute_entity_task(_rename_dem_folder, gdirs, source=s)
# make a GeoTiff mask of the glacier, choose any source
workflow.execute_entity_task(gis.rasterio_glacier_mask,
gdirs, source='ALL')
# Compress all in output directory
l_base_dir = os.path.join(base_dir, 'L1')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
_time_log()
return
if dem_source:
# Force a given source
rgidf['DEM_SOURCE'] = dem_source.upper()
# L1 - go
gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L1', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# L1 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L1')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 1:
_time_log()
return
# L2 - Tasks
# Pre-download other files just in case
if test_crudir is None:
_ = utils.get_cru_file(var='tmp')
_ = utils.get_cru_file(var='pre')
else:
cfg.PATHS['cru_dir'] = test_crudir
workflow.execute_entity_task(tasks.process_cru_data, gdirs)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L2', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# L2 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L2')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 2:
_time_log()
return
# L3 - Tasks
task_list = [
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.compute_downstream_line,
tasks.compute_downstream_bedshape,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
tasks.local_t_star,
tasks.mu_star_calibration,
tasks.prepare_for_inversion,
tasks.mass_conservation_inversion,
tasks.filter_inversion_output,
tasks.init_present_time_glacier
]
for task in task_list:
workflow.execute_entity_task(task, gdirs)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L3', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
utils.compile_climate_statistics(gdirs, add_climate_period=climate_periods,
path=opath)
# L3 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L3')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 3:
_time_log()
return
# L4 - No tasks: add some stats for consistency and make the dirs small
sum_dir = os.path.join(base_dir, 'L4', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# Copy mini data to new dir
base_dir = os.path.join(base_dir, 'L4')
mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir, gdirs,
base_dir=base_dir)
# L4 OK - compress all in output directory
workflow.execute_entity_task(utils.gdir_to_tar, mini_gdirs, delete=True)
utils.base_dir_to_tar(base_dir)
_time_log()
|
58,779 | def _get_targets(target_str=None):
if target_str is None:
target_str = os.environ.get("TVM_TEST_TARGETS", "")
# Use dict instead of set for de-duplication so that the
# targets stay in the order specified.
target_names = list({t.strip(): None for t in target_str.split(";") if t.strip()})
if len(target_names) == 0:
target_names = DEFAULT_TEST_TARGETS
targets = []
for target in target_names:
target_kind = target.split()[0]
is_enabled = tvm.runtime.enabled(target_kind)
is_runnable = is_enabled and tvm.device(target_kind).exist
targets.append(
{
"target": target,
"target_kind": target_kind,
"is_enabled": is_enabled,
"is_runnable": is_runnable,
}
)
if all(not t["is_runnable"] for t in targets):
if tvm.runtime.enabled("llvm"):
logging.warning(
"None of the following targets are supported by this build of TVM: %s."
" Try setting TVM_TEST_TARGETS to a supported target. Defaulting to llvm.",
target_str,
)
return _get_targets("llvm")
raise TVMError(
"None of the following targets are supported by this build of TVM: %s."
" Try setting TVM_TEST_TARGETS to a supported target."
" Cannot default to llvm, as it is not enabled." % target_str
)
return targets
| def _get_targets(target_str=None):
if target_str is None:
target_str = os.environ.get("TVM_TEST_TARGETS", "")
# Use dict instead of set for de-duplication so that the
# targets stay in the order specified.
target_names = list({t.strip(): None for t in target_str.split(";") if t.strip()})
if not target_names:
target_names = DEFAULT_TEST_TARGETS
targets = []
for target in target_names:
target_kind = target.split()[0]
is_enabled = tvm.runtime.enabled(target_kind)
is_runnable = is_enabled and tvm.device(target_kind).exist
targets.append(
{
"target": target,
"target_kind": target_kind,
"is_enabled": is_enabled,
"is_runnable": is_runnable,
}
)
if all(not t["is_runnable"] for t in targets):
if tvm.runtime.enabled("llvm"):
logging.warning(
"None of the following targets are supported by this build of TVM: %s."
" Try setting TVM_TEST_TARGETS to a supported target. Defaulting to llvm.",
target_str,
)
return _get_targets("llvm")
raise TVMError(
"None of the following targets are supported by this build of TVM: %s."
" Try setting TVM_TEST_TARGETS to a supported target."
" Cannot default to llvm, as it is not enabled." % target_str
)
return targets
|
23,471 | def make_app_bundle(dist_dir, make_lite=False):
"""
Make macOS application bundle.
Parameters
----------
dist_dir : str
Directory in which to put the application bundle.
make_lite : bool, optional
Whether to create the application bundle with minimal packages.
The default is False.
NOTES
-----
py2app includes all packages in Spyder.app/Contents/Resources/lib/
python<ver>.zip, but some packages have issues when placed there.
The following packages are included in py2app's PACKAGES option so that
they will be placed in Spyder.app/Contents/Resources/lib/python<ver>
instead.
alabaster :
Error message: [Errno 20] Not a directory: '<path>/Resources/lib/
python38.zip/alabaster'
astroid :
ImportError: cannot import name 'context' from 'astroid'
(<path>/Resources/lib/python38.zip/astroid/__init__.pyc)
blib2to3 :
File "<frozen zipimport>", line 177, in get_data
KeyError: 'blib2to3/Users/rclary/Library/Caches/black/20.8b1/
Grammar3.8.6.final.0.pickle'
debugpy :
NotADirectoryError: [Errno 20] Not a directory:
'<path>/Resources/lib/python39.zip/debugpy/_vendored'
docutils :
[Errno 20] Not a directory: '<path>/Resources/lib/python39.zip/
docutils/writers/latex2e/docutils.sty'
IPython :
[IPKernelApp] WARNING | Could not copy README_STARTUP to startup dir.
Source file
<path>/Resources/lib/python38.zip/IPython/core/profile/README_STARTUP
does not exist
jedi :
jedi.api.environment.InvalidPythonEnvironment: Could not get version
information for '<path>/Contents/MacOS/python': InternalError("The
subprocess <path>/Contents/MacOS/python has crashed (EOFError('Ran out
of input'), stderr=).")
jinja2 :
No module named 'jinja2.ext'
keyring :
ModuleNotFoundError: No module named 'keyring.backends.<mod>'
pandas :
From Variable explorer: KeyError('pandas._libs.interval')
parso :
jedi.api.environment.InvalidPythonEnvironment: Could not get version
information for '/Users/rclary/opt/miniconda3/envs/c2w_37/bin/python':
InternalError("The subprocess /Users/rclary/opt/miniconda3/envs/c2w_37/
bin/python has crashed (EOFError('Ran out of input'), stderr=).")
PIL :
Library not loaded: @loader_path/.dylibs/libjpeg.9.dylib
Note: only applicable to not-Lite build
pygments :
ModuleNotFoundError: No module named 'pygments.formatters.latex'
pylint :
<path>/Contents/MacOS/python: No module named pylint.__main__
pylsp :
<path>/Contents/MacOS/python: No module named pylsp
Note: still occurs in alias mode
pylsp_black :
Mandatory: python-pyls-black >=1.0.0 : None (NOK)
pyls_spyder :
Mandatory: pyls_spyder >=0.1.1 : None (NOK)
qtawesome :
NotADirectoryError: [Errno 20] Not a directory: '<path>/Resourses/lib/
python38.zip/qtawesome/fonts/fontawesome4.7-webfont.ttf'
setuptools :
Mandatory: setuptools >=49.6.0 : None (NOK)
sphinx :
No module named 'sphinx.builders.changes'
spyder :
NotADirectoryError: [Errno 20] Not a directory: '<path>/Resources/lib/
python38.zip/spyder/app/mac_stylesheet.qss'
spyder_kernels :
No module named spyder_kernels.console.__main__
textdistance :
NotADirectoryError: [Errno 20] Not a directory: '<path>/Resources/lib/
python39.zip/textdistance/libraries.json'
"""
import shutil
import pkg_resources
from spyder import __version__ as SPYVER
from spyder.config.utils import EDIT_FILETYPES, _get_extensions
from spyder.config.base import MAC_APP_NAME
# Patch py2app for IPython help()
py2app_file = pkg_resources.pkgutil.get_loader('py2app').get_filename()
site_file = os.path.join(os.path.dirname(py2app_file), 'apptemplate',
'lib', 'site.py')
logger.info('Patching %s...', site_file)
with open(site_file, 'a+') as f:
f.seek(0)
content = f.read()
if 'builtins.help = _sitebuiltins._Helper()' not in content:
f.write('\nimport builtins'
'\nimport _sitebuiltins'
'\nbuiltins.help = _sitebuiltins._Helper()\n')
build_type = 'lite' if make_lite else 'full'
logger.info('Creating %s app bundle...', build_type)
PACKAGES = ['alabaster', 'astroid', 'blib2to3', 'docutils', 'IPython',
'jedi', 'jinja2', 'keyring', 'parso', 'pygments', 'pylint',
'pylsp', 'pylsp_black', 'pyls_spyder', 'qtawesome',
'setuptools', 'sphinx', 'spyder', 'spyder_kernels',
'textdistance',
'debugpy',
]
INCLUDES = ['_sitebuiltins', # required for IPython help()
# required for sphinx
'sphinxcontrib.applehelp', 'sphinxcontrib.devhelp',
'sphinxcontrib.htmlhelp', 'sphinxcontrib.jsmath',
'sphinxcontrib.qthelp', 'sphinxcontrib.serializinghtml',
'platformdirs.macos', # required for platformdirs
]
EXCLUDES = []
EXCLUDE_EGG = ['py2app']
if make_lite:
EXCLUDES.extend([
'numpy', 'scipy', 'pandas', 'matplotlib', 'cython', 'sympy', 'PIL'
])
EXCLUDE_EGG.extend(['pillow'])
else:
INCLUDES.extend([
'numpy', 'scipy', 'pandas', 'matplotlib', 'cython', 'sympy'
])
PACKAGES.extend(['pandas', 'PIL'])
EXCLUDE_EGG.extend(EXCLUDES)
EDIT_EXT = [ext[1:] for ext in _get_extensions(EDIT_FILETYPES)]
# Get rtree dylibs
rtree_loc = pkg_resources.get_distribution('rtree').module_path
rtree_dylibs = os.scandir(os.path.join(rtree_loc, 'rtree', 'lib'))
FRAMEWORKS = [lib.path for lib in rtree_dylibs]
OPTIONS = {
'optimize': 0,
'packages': PACKAGES,
'includes': INCLUDES,
'excludes': EXCLUDES,
'iconfile': ICONFILE,
'dist_dir': dist_dir,
'frameworks': FRAMEWORKS,
'plist': {
'CFBundleDocumentTypes': [{'CFBundleTypeExtensions': EDIT_EXT,
'CFBundleTypeName': 'Text File',
'CFBundleTypeRole': 'Editor'}],
'CFBundleIdentifier': 'org.spyder-ide',
'CFBundleShortVersionString': SPYVER,
'NSRequiresAquaSystemAppearance': False # Darkmode support
}
}
# Copy main application script
app_script_name = MAC_APP_NAME.replace('.app', '.py')
app_script_path = os.path.join(SPYREPO, 'scripts', app_script_name)
shutil.copy2(os.path.join(SPYREPO, 'scripts', 'spyder'), app_script_path)
# Build the application
try:
os.symlink(os.path.join(SPYREPO, 'spyder'), SPYLINK)
setup(app=[app_script_path], options={'py2app': OPTIONS})
finally:
os.remove(app_script_path)
os.remove(SPYLINK)
# Copy egg info from site-packages: fixes several pkg_resources issues
dest_dir = os.path.join(dist_dir, MAC_APP_NAME, 'Contents', 'Resources',
'lib', f'python{PYVER[0]}.{PYVER[1]}')
pkg_resources.working_set.add_entry(SPYREPO)
for dist in pkg_resources.working_set:
if (dist.egg_info is None or dist.key.startswith('pyobjc')
or dist.key in EXCLUDE_EGG):
logger.info(f'Skipping egg {dist.key}')
continue
egg = os.path.basename(dist.egg_info)
dest = os.path.join(dest_dir, egg)
shutil.copytree(dist.egg_info, dest)
logger.info(f'Copied {egg}')
logger.info('App bundle complete.')
return
| def make_app_bundle(dist_dir, make_lite=False):
"""
Make macOS application bundle.
Parameters
----------
dist_dir : str
Directory in which to put the application bundle.
make_lite : bool, optional
Whether to create the application bundle with minimal packages.
The default is False.
NOTES
-----
py2app includes all packages in Spyder.app/Contents/Resources/lib/
python<ver>.zip, but some packages have issues when placed there.
The following packages are included in py2app's PACKAGES option so that
they will be placed in Spyder.app/Contents/Resources/lib/python<ver>
instead.
alabaster :
Error message: [Errno 20] Not a directory: '<path>/Resources/lib/
python38.zip/alabaster'
astroid :
ImportError: cannot import name 'context' from 'astroid'
(<path>/Resources/lib/python38.zip/astroid/__init__.pyc)
blib2to3 :
File "<frozen zipimport>", line 177, in get_data
KeyError: 'blib2to3/Users/rclary/Library/Caches/black/20.8b1/
Grammar3.8.6.final.0.pickle'
debugpy :
NotADirectoryError: [Errno 20] Not a directory:
'<path>/Resources/lib/python39.zip/debugpy/_vendored'
docutils :
[Errno 20] Not a directory: '<path>/Resources/lib/python39.zip/
docutils/writers/latex2e/docutils.sty'
IPython :
[IPKernelApp] WARNING | Could not copy README_STARTUP to startup dir.
Source file
<path>/Resources/lib/python38.zip/IPython/core/profile/README_STARTUP
does not exist
jedi :
jedi.api.environment.InvalidPythonEnvironment: Could not get version
information for '<path>/Contents/MacOS/python': InternalError("The
subprocess <path>/Contents/MacOS/python has crashed (EOFError('Ran out
of input'), stderr=).")
jinja2 :
No module named 'jinja2.ext'
keyring :
ModuleNotFoundError: No module named 'keyring.backends.<mod>'
pandas :
From Variable explorer: KeyError('pandas._libs.interval')
parso :
jedi.api.environment.InvalidPythonEnvironment: Could not get version
information for '/Users/rclary/opt/miniconda3/envs/c2w_37/bin/python':
InternalError("The subprocess /Users/rclary/opt/miniconda3/envs/c2w_37/
bin/python has crashed (EOFError('Ran out of input'), stderr=).")
PIL :
Library not loaded: @loader_path/.dylibs/libjpeg.9.dylib
Note: only applicable to not-Lite build
pygments :
ModuleNotFoundError: No module named 'pygments.formatters.latex'
pylint :
<path>/Contents/MacOS/python: No module named pylint.__main__
pylsp :
<path>/Contents/MacOS/python: No module named pylsp
Note: still occurs in alias mode
pylsp_black :
Mandatory: python-pyls-black >=1.0.0 : None (NOK)
pyls_spyder :
Mandatory: pyls_spyder >=0.1.1 : None (NOK)
qtawesome :
NotADirectoryError: [Errno 20] Not a directory: '<path>/Resourses/lib/
python38.zip/qtawesome/fonts/fontawesome4.7-webfont.ttf'
setuptools :
Mandatory: setuptools >=49.6.0 : None (NOK)
sphinx :
No module named 'sphinx.builders.changes'
spyder :
NotADirectoryError: [Errno 20] Not a directory: '<path>/Resources/lib/
python38.zip/spyder/app/mac_stylesheet.qss'
spyder_kernels :
No module named spyder_kernels.console.__main__
textdistance :
NotADirectoryError: [Errno 20] Not a directory: '<path>/Resources/lib/
python39.zip/textdistance/libraries.json'
"""
import shutil
import pkg_resources
from spyder import __version__ as SPYVER
from spyder.config.utils import EDIT_FILETYPES, _get_extensions
from spyder.config.base import MAC_APP_NAME
# Patch py2app for IPython help()
py2app_file = pkg_resources.pkgutil.get_loader('py2app').get_filename()
site_file = os.path.join(os.path.dirname(py2app_file), 'apptemplate',
'lib', 'site.py')
logger.info('Patching %s...', site_file)
with open(site_file, 'a+') as f:
f.seek(0)
content = f.read()
if 'builtins.help = _sitebuiltins._Helper()' not in content:
f.write('\nimport builtins'
'\nimport _sitebuiltins'
'\nbuiltins.help = _sitebuiltins._Helper()\n')
build_type = 'lite' if make_lite else 'full'
logger.info('Creating %s app bundle...', build_type)
PACKAGES = ['alabaster', 'astroid', 'blib2to3', 'docutils', 'IPython',
'jedi', 'jinja2', 'keyring', 'parso', 'pygments', 'pylint',
'pylsp', 'pylsp_black', 'pyls_spyder', 'qtawesome',
'setuptools', 'sphinx', 'spyder', 'spyder_kernels',
'textdistance', 'debugpy',
]
INCLUDES = ['_sitebuiltins', # required for IPython help()
# required for sphinx
'sphinxcontrib.applehelp', 'sphinxcontrib.devhelp',
'sphinxcontrib.htmlhelp', 'sphinxcontrib.jsmath',
'sphinxcontrib.qthelp', 'sphinxcontrib.serializinghtml',
'platformdirs.macos', # required for platformdirs
]
EXCLUDES = []
EXCLUDE_EGG = ['py2app']
if make_lite:
EXCLUDES.extend([
'numpy', 'scipy', 'pandas', 'matplotlib', 'cython', 'sympy', 'PIL'
])
EXCLUDE_EGG.extend(['pillow'])
else:
INCLUDES.extend([
'numpy', 'scipy', 'pandas', 'matplotlib', 'cython', 'sympy'
])
PACKAGES.extend(['pandas', 'PIL'])
EXCLUDE_EGG.extend(EXCLUDES)
EDIT_EXT = [ext[1:] for ext in _get_extensions(EDIT_FILETYPES)]
# Get rtree dylibs
rtree_loc = pkg_resources.get_distribution('rtree').module_path
rtree_dylibs = os.scandir(os.path.join(rtree_loc, 'rtree', 'lib'))
FRAMEWORKS = [lib.path for lib in rtree_dylibs]
OPTIONS = {
'optimize': 0,
'packages': PACKAGES,
'includes': INCLUDES,
'excludes': EXCLUDES,
'iconfile': ICONFILE,
'dist_dir': dist_dir,
'frameworks': FRAMEWORKS,
'plist': {
'CFBundleDocumentTypes': [{'CFBundleTypeExtensions': EDIT_EXT,
'CFBundleTypeName': 'Text File',
'CFBundleTypeRole': 'Editor'}],
'CFBundleIdentifier': 'org.spyder-ide',
'CFBundleShortVersionString': SPYVER,
'NSRequiresAquaSystemAppearance': False # Darkmode support
}
}
# Copy main application script
app_script_name = MAC_APP_NAME.replace('.app', '.py')
app_script_path = os.path.join(SPYREPO, 'scripts', app_script_name)
shutil.copy2(os.path.join(SPYREPO, 'scripts', 'spyder'), app_script_path)
# Build the application
try:
os.symlink(os.path.join(SPYREPO, 'spyder'), SPYLINK)
setup(app=[app_script_path], options={'py2app': OPTIONS})
finally:
os.remove(app_script_path)
os.remove(SPYLINK)
# Copy egg info from site-packages: fixes several pkg_resources issues
dest_dir = os.path.join(dist_dir, MAC_APP_NAME, 'Contents', 'Resources',
'lib', f'python{PYVER[0]}.{PYVER[1]}')
pkg_resources.working_set.add_entry(SPYREPO)
for dist in pkg_resources.working_set:
if (dist.egg_info is None or dist.key.startswith('pyobjc')
or dist.key in EXCLUDE_EGG):
logger.info(f'Skipping egg {dist.key}')
continue
egg = os.path.basename(dist.egg_info)
dest = os.path.join(dest_dir, egg)
shutil.copytree(dist.egg_info, dest)
logger.info(f'Copied {egg}')
logger.info('App bundle complete.')
return
|
6,921 | def missing_in_backup(doctypes, file):
"""Checks if the list of doctypes exist in the database.sql.gz file supplied
Args:
doctypes (list): List of DocTypes to be checked
file (str): Path of the database file
Returns:
doctypes(list): doctypes that are missing in backup
"""
predicate = (
'COPY public."tab{}"'
if frappe.conf.db_type == "postgres"
else "CREATE TABLE `tab{}`"
)
with gzip.open(file, "rb") as f:
content = f.read().decode("utf8").lower()
return [doctype for doctype in doctypes
if predicate.format(doctype).lower() not in content]
| def missing_in_backup(doctypes, file):
"""Returns list of missing doctypes in the backup.
Args:
doctypes (list): List of DocTypes to be checked
file (str): Path of the database file
Returns:
doctypes(list): doctypes that are missing in backup
"""
predicate = (
'COPY public."tab{}"'
if frappe.conf.db_type == "postgres"
else "CREATE TABLE `tab{}`"
)
with gzip.open(file, "rb") as f:
content = f.read().decode("utf8").lower()
return [doctype for doctype in doctypes
if predicate.format(doctype).lower() not in content]
|
30,155 | def fetch_consumption_df(zone_key='JP-TK', target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Returns the consumption for an area as a pandas DataFrame
For JP-CB the consumption file includes solar production
"""
datestamp = arrow.get(target_datetime).to('Asia/Tokyo').strftime('%Y%m%d')
consumption_url = {
'JP-HKD': 'http://denkiyoho.hepco.co.jp/area/data/juyo_01_{}.csv'.format(datestamp),
'JP-TH': 'https://setsuden.nw.tohoku-epco.co.jp/common/demand/juyo_02_{}.csv'.format(datestamp),
'JP-TK': 'http://www.tepco.co.jp/forecast/html/images/juyo-d-j.csv',
'JP-HR': 'http://www.rikuden.co.jp/nw/denki-yoho/csv/juyo_05_{}.csv'.format(datestamp),
'JP-CB': 'https://powergrid.chuden.co.jp/denki_yoho_content_data/juyo_cepco003.csv',
'JP-KN': 'https://www.kansai-td.co.jp/yamasou/juyo1_kansai.csv',
'JP-CG': 'https://www.energia.co.jp/nw/jukyuu/sys/juyo_07_{}.csv'.format(datestamp),
'JP-SK': 'http://www.yonden.co.jp/denkiyoho/juyo_shikoku.csv',
'JP-KY': 'https://www.kyuden.co.jp/td_power_usages/csv/juyo-hourly-{}.csv'.format(datestamp),
'JP-ON': 'https://www.okiden.co.jp/denki2/juyo_10_{}.csv'.format(datestamp)
}
# First roughly 40 rows of the consumption files have hourly data,
# the parser skips to the rows with 5-min actual values
if zone_key == 'JP-KN':
startrow = 57
else:
startrow = 54
try:
df = pd.read_csv(consumption_url[zone_key], skiprows=startrow,
encoding='shift-jis')
except pd.errors.EmptyDataError as e:
logger.error("Data not available yet")
raise e
if zone_key in ['JP-TH']:
df.columns = ['Date', 'Time', 'cons', 'solar', 'wind']
else:
df.columns = ['Date', 'Time', 'cons', 'solar']
# Convert 万kW to MW
df['cons'] = 10*df['cons']
if 'solar' in df.columns:
df['solar'] = 10*df['solar']
df = df.dropna()
df['datetime'] = df.apply(parse_dt, axis=1)
if 'solar' in df.columns:
df = df[['datetime', 'cons','solar']]
else:
df = df[['datetime', 'cons']]
return df
| def fetch_consumption_df(zone_key='JP-TK', target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Returns the consumption for an area as a pandas DataFrame
For JP-CB the consumption file includes solar production
"""
datestamp = arrow.get(target_datetime).to('Asia/Tokyo').strftime('%Y%m%d')
consumption_url = {
'JP-HKD': 'http://denkiyoho.hepco.co.jp/area/data/juyo_01_{}.csv'.format(datestamp),
'JP-TH': 'https://setsuden.nw.tohoku-epco.co.jp/common/demand/juyo_02_{}.csv'.format(datestamp),
'JP-TK': 'http://www.tepco.co.jp/forecast/html/images/juyo-d-j.csv',
'JP-HR': 'http://www.rikuden.co.jp/nw/denki-yoho/csv/juyo_05_{}.csv'.format(datestamp),
'JP-CB': 'https://powergrid.chuden.co.jp/denki_yoho_content_data/juyo_cepco003.csv',
'JP-KN': 'https://www.kansai-td.co.jp/yamasou/juyo1_kansai.csv',
'JP-CG': 'https://www.energia.co.jp/nw/jukyuu/sys/juyo_07_{}.csv'.format(datestamp),
'JP-SK': 'http://www.yonden.co.jp/denkiyoho/juyo_shikoku.csv',
'JP-KY': 'https://www.kyuden.co.jp/td_power_usages/csv/juyo-hourly-{}.csv'.format(datestamp),
'JP-ON': 'https://www.okiden.co.jp/denki2/juyo_10_{}.csv'.format(datestamp)
}
# First roughly 40 rows of the consumption files have hourly data,
# the parser skips to the rows with 5-min actual values
if zone_key == 'JP-KN':
startrow = 57
else:
startrow = 54
try:
df = pd.read_csv(consumption_url[zone_key], skiprows=startrow,
encoding='shift-jis')
except pd.errors.EmptyDataError as e:
logger.exception("Data not available yet")
raise e
if zone_key in ['JP-TH']:
df.columns = ['Date', 'Time', 'cons', 'solar', 'wind']
else:
df.columns = ['Date', 'Time', 'cons', 'solar']
# Convert 万kW to MW
df['cons'] = 10*df['cons']
if 'solar' in df.columns:
df['solar'] = 10*df['solar']
df = df.dropna()
df['datetime'] = df.apply(parse_dt, axis=1)
if 'solar' in df.columns:
df = df[['datetime', 'cons','solar']]
else:
df = df[['datetime', 'cons']]
return df
|
50,325 | def test_list_project_forks(gl, resp_list_forks):
forks = gl.projects.get(1).forks.list()
assert isinstance(forks[0], ProjectFork)
assert forks[0].id == 1
assert forks[1].id == "python-gitlab%2Fpython-gitlab"
| def test_list_project_forks(project, resp_list_forks):
forks = project.forks.list()
assert isinstance(forks[0], ProjectFork)
assert forks[0].id == 1
assert forks[1].id == "python-gitlab%2Fpython-gitlab"
|
441 | def generate_from_form_export_instance(export_instance, output_file):
assert isinstance(export_instance, FormExportInstance)
if not export_instance.selected_tables:
raise DETConfigError(_(f'No Tables found in Export {export_instance.name}'))
main_input_table = export_instance.selected_tables[0]
main_output_table = DETTable(
name=main_input_table.label,
source='form',
filter_name='xmlns',
filter_value=export_instance.xmlns,
rows=[],
)
output = DETConfig(name=export_instance.name, tables=[main_output_table])
_add_rows_for_table(main_input_table, main_output_table)
for additional_input_table in export_instance.selected_tables[1:]:
additional_output_table = DETTable(
name=additional_input_table.label,
source=f'form.{additional_input_table.readable_path}[*]',
filter_name='xmlns',
filter_value=export_instance.xmlns,
rows=[],
)
# note: this has to be defined here because it relies on closures
def _strip_repeat_path(input_path):
return input_path.replace(f'{additional_input_table.readable_path}.', '')
_add_rows_for_table(additional_input_table, additional_output_table,
path_transform_fn=_strip_repeat_path)
output.tables.append(additional_output_table)
output.export_to_file(output_file)
| def generate_from_form_export_instance(export_instance, output_file):
assert isinstance(export_instance, FormExportInstance)
if not export_instance.selected_tables:
raise DETConfigError(_('No Tables found in Export {name}').format(name=export_instance.name))
main_input_table = export_instance.selected_tables[0]
main_output_table = DETTable(
name=main_input_table.label,
source='form',
filter_name='xmlns',
filter_value=export_instance.xmlns,
rows=[],
)
output = DETConfig(name=export_instance.name, tables=[main_output_table])
_add_rows_for_table(main_input_table, main_output_table)
for additional_input_table in export_instance.selected_tables[1:]:
additional_output_table = DETTable(
name=additional_input_table.label,
source=f'form.{additional_input_table.readable_path}[*]',
filter_name='xmlns',
filter_value=export_instance.xmlns,
rows=[],
)
# note: this has to be defined here because it relies on closures
def _strip_repeat_path(input_path):
return input_path.replace(f'{additional_input_table.readable_path}.', '')
_add_rows_for_table(additional_input_table, additional_output_table,
path_transform_fn=_strip_repeat_path)
output.tables.append(additional_output_table)
output.export_to_file(output_file)
|
20,558 | def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# See if there's a configuration file and import those options
if arguments.config is not None:
print('configuring')
with open(arguments.config, 'r') as conf:
_, ext = os.path.splitext(arguments.config)
if ext == '.json':
config = json.load(conf)
if ext == '.yml' or ext == '.yaml':
config = yaml.load(conf, Loader=yaml.Loader)
# Warn people if they're overriding their config file
if len(argv) > 2:
warnings.warn(UserWarning('Using the `-config|-c` flag with additional arguments is discouraged'))
# Check for unsupported arguments
orig_keys = set(vars(arguments).keys())
config_keys = set(config.keys())
if orig_keys != config_keys:
for k in config_keys.difference(orig_keys):
del config[k] # Remove the unknown key
warnings.warn(UserWarning(
'Unknown key "{}" found in your configuration file, ignoring.'.format(k)))
# Update the default to match the config
parser.set_defaults(**config)
# Reparse the arguments
arguments = parser.parse_args(argv)
if arguments.script is None:
parser.error("The -script argument must be provided, either via command-line or via the -config/-c argument.")
# Set up email notifications if desired
do_email = arguments.email_to is not None
if do_email:
email_to = arguments.email_to
if arguments.email_from is not None:
email_from = arguments.email_from
else:
email_from = arguments.email_to
smtp_host, smtp_port = arguments.email_host.split(":")
smtp_port = int(smtp_port)
email_pass = getpass('Please input your email password:\n')
def send_notification(subject, message):
send_email(email_to, email_from,
subject=subject,
message=message,
passwd=email_pass,
smtp_host=smtp_host,
smtp_port=smtp_port)
while True:
send_test = input('Would you like to send a test email to validate your settings? [Y/n]:\n')
if send_test.lower() in ['', 'y', 'n']:
break
else:
print('Please input y or n')
if send_test.lower() in ['', 'y']:
send_notification('sct_run_batch: test notification', 'Looks good')
# Set up output directories and create them if they don't already exist
path_output = os.path.abspath(os.path.expanduser(arguments.path_output))
path_results = os.path.join(path_output, 'results')
path_data_processed = os.path.join(path_output, 'data_processed')
path_log = os.path.join(path_output, 'log')
path_qc = os.path.join(path_output, 'qc')
path_segmanual = os.path.abspath(os.path.expanduser(arguments.path_segmanual))
script = os.path.abspath(os.path.expanduser(arguments.script))
path_data = os.path.abspath(os.path.expanduser(arguments.path_data))
for pth in [path_output, path_results, path_data_processed, path_log, path_qc]:
os.makedirs(pth, exist_ok=True)
# Check that the script can be found
if not os.path.exists(script):
raise FileNotFoundError('Couldn\'t find the script script at {}'.format(script))
# Setup overall log
batch_log = open(os.path.join(path_log, arguments.batch_log), 'w')
# Duplicate init_sct message to batch_log
print('\n--\nSpinal Cord Toolbox ({})\n'.format(__version__), file=batch_log, flush=True)
# Tee IO to batch_log and std(out/err)
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sys.stdout = Tee(batch_log, orig_stdout)
sys.stderr = Tee(batch_log, orig_stderr)
def reset_streams():
sys.stdout = orig_stdout
sys.stderr = orig_stderr
# Display OS
print("INFO SYSTEM")
print("-----------")
platform_running = sys.platform
if platform_running.startswith('darwin'):
os_running = 'osx'
elif platform_running.startswith('linux'):
os_running = 'linux'
elif platform_running.startswith('win32'):
os_running = 'windows'
else:
os_running = platform_running
print('OS: ' + os_running + ' (' + platform.platform() + ')')
# Display number of CPU cores
print('CPU cores: Available: {} | Threads used by ITK Programs: {}'.format(multiprocessing.cpu_count(), arguments.itk_threads))
# Display RAM available
print("RAM: Total {} MB | Available {} MB | Used {} MB".format(
int(psutil.virtual_memory().total / 1024 / 1024),
int(psutil.virtual_memory().available / 1024 / 1024),
int(psutil.virtual_memory().used / 1024 / 1024),
))
# Log the current arguments (in yaml because it's cleaner)
print('\nINPUT ARGUMENTS')
print("---------------")
print(yaml.dump(vars(arguments)))
# Display script version info
print("SCRIPT")
print("------")
print("git commit: {}".format(__get_commit(path_to_git_folder=os.path.dirname(script))))
print("git origin: {}".format(__get_git_origin(path_to_git_folder=os.path.dirname(script))))
print("Copying script to output folder...")
if os.path.isdir(script):
print("Input folder is a directory (not a file). Skipping copy.")
else:
try:
# Copy the script and record the new location
script_copy = os.path.abspath(shutil.copy(script, arguments.path_output))
print("{} -> {}".format(script, script_copy))
script = script_copy
except shutil.SameFileError:
print("Input and output folder are the same. Skipping copy.")
pass
print("Setting execute permissions for script file {} ...".format(arguments.script))
script_stat = os.stat(script)
os.chmod(script, script_stat.st_mode | S_IEXEC)
# Display data version info
print("\nDATA")
print("----")
print("git commit: {}".format(__get_commit(path_to_git_folder=path_data)))
print("git origin: {}\n".format(__get_git_origin(path_to_git_folder=path_data)))
# Find subjects and process inclusion/exclusions
subject_dirs = []
subject_flat_dirs = [f for f in os.listdir(path_data) if f.startswith(arguments.subject_prefix)]
for isub in subject_flat_dirs:
# Only consider folders
if os.path.isdir(os.path.join(path_data, isub)):
session_dirs = [f for f in os.listdir(os.path.join(path_data, isub)) if f.startswith('ses-')]
if not session_dirs:
# There is no session folder, so we consider only sub- directory: sub-XX
subject_dirs.append(isub)
else:
# There is a session folder, so we concatenate: sub-XX/ses-YY
session_dirs.sort()
for isess in session_dirs:
subject_dirs.append(os.path.join(isub, isess))
# Handle inclusion lists
assert not ((arguments.include is not None) and (arguments.include_list is not None)),\
'Only one of `include` and `include-list` can be used'
if arguments.include is not None:
subject_dirs = [f for f in subject_dirs if re.search(arguments.include, f) is not None]
if arguments.include_list is not None:
# TODO decide if we should warn users if one of their inclusions isn't around
subject_dirs = [f for f in subject_dirs if f in arguments.include_list]
# Handle exclusions
assert not ((arguments.exclude is not None) and (arguments.exclude_list is not None)),\
'Only one of `exclude` and `exclude-list` can be used'
if arguments.exclude is not None:
subject_dirs = [f for f in subject_dirs if re.search(arguments.exclude, f) is None]
if arguments.exclude_list is not None:
subject_dirs = [f for f in subject_dirs if f not in arguments.exclude_list]
# Determine the number of jobs we can run simultaneously
if arguments.jobs < 1:
jobs = multiprocessing.cpu_count() + arguments.jobs
else:
jobs = arguments.jobs
print("RUNNING")
print("-------")
print("Processing {} subjects in parallel. (Worker processes used: {}).".format(len(subject_dirs), jobs))
# Run the jobs, recording start and end times
start = datetime.datetime.now()
# Trap errors to send an email if a script fails.
try:
with multiprocessing.Pool(jobs) as p:
run_single_dir = functools.partial(run_single,
script=script,
script_args=arguments.script_args,
path_segmanual=path_segmanual,
path_data=path_data,
path_data_processed=path_data_processed,
path_results=path_results,
path_log=path_log,
path_qc=path_qc,
itk_threads=arguments.itk_threads,
continue_on_error=arguments.continue_on_error)
results = list(p.imap(run_single_dir, subject_dirs))
except Exception as e:
if do_email:
message = ('Oh no there has been the following error in your pipeline:\n\n'
'{}'.format(e))
try:
# I consider the multiprocessing error more significant than a potential email error, this
# ensures that the multiprocessing error is signalled.
send_notification('sct_run_batch errored', message)
except Exception:
raise e
raise e
else:
raise e
end = datetime.datetime.now()
# Check for failed subjects
fails = [sd for (sd, ret) in zip(subject_dirs, results) if ret.returncode != 0]
if len(fails) == 0:
status_message = '\nHooray! your batch completed successfully :-)\n'
else:
status_message = ('\nYour batch completed but some subjects may have not completed '
'successfully, please consult the logs for:\n'
'{}\n'.format('\n'.join(fails)))
print(status_message)
# Display timing
duration = end - start
timing_message = ('Started: {} | Ended: {} | Duration: {}\n'.format(
start.strftime('%Hh%Mm%Ss'),
end.strftime('%Hh%Mm%Ss'),
(datetime.datetime.utcfromtimestamp(0) + duration).strftime('%Hh%Mm%Ss')))
print(timing_message)
if do_email:
send_notification('sct_run_batch: Run completed',
status_message + timing_message)
display_open(file=os.path.join(path_qc, "index.html"),
message="To open the Quality Control (QC) report on a web-browser")
if arguments.zip:
file_zip = 'sct_run_batch_{}'.format(time.strftime('%Y%m%d%H%M%S'))
path_tmp = os.path.join(tempfile.mkdtemp(), file_zip)
os.makedirs(os.path.join(path_tmp, file_zip))
for folder in [path_log, path_qc, path_results]:
shutil.copytree(folder, os.path.join(path_tmp, file_zip, os.path.split(folder)[-1]))
shutil.make_archive(os.path.join(path_output, file_zip), 'zip', path_tmp)
shutil.rmtree(path_tmp)
print("\nOutput zip archive: {}.zip".format(os.path.join(path_output, file_zip)))
reset_streams()
batch_log.close()
| def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# See if there's a configuration file and import those options
if arguments.config is not None:
print('configuring')
with open(arguments.config, 'r') as conf:
_, ext = os.path.splitext(arguments.config)
if ext == '.json':
config = json.load(conf)
if ext == '.yml' or ext == '.yaml':
config = yaml.load(conf, Loader=yaml.Loader)
# Warn people if they're overriding their config file
if len(argv) > 2:
warnings.warn(UserWarning('Using the `-config|-c` flag with additional arguments is discouraged'))
# Check for unsupported arguments
orig_keys = set(vars(arguments).keys())
config_keys = set(config.keys())
if orig_keys != config_keys:
for k in config_keys.difference(orig_keys):
del config[k] # Remove the unknown key
warnings.warn(UserWarning(
'Unknown key "{}" found in your configuration file, ignoring.'.format(k)))
# Update the default to match the config
parser.set_defaults(**config)
# Reparse the arguments
arguments = parser.parse_args(argv)
if arguments.script is None:
parser.error("The -script argument must be provided, either via command-line or via the -config/-c argument.")
# Set up email notifications if desired
do_email = arguments.email_to is not None
if do_email:
email_to = arguments.email_to
if arguments.email_from is not None:
email_from = arguments.email_from
else:
email_from = arguments.email_to
smtp_host, smtp_port = arguments.email_host.split(":")
smtp_port = int(smtp_port)
email_pass = getpass('Please input your email password:\n')
def send_notification(subject, message):
send_email(email_to, email_from,
subject=subject,
message=message,
passwd=email_pass,
smtp_host=smtp_host,
smtp_port=smtp_port)
while True:
send_test = input('Would you like to send a test email to validate your settings? [Y/n]:\n')
if send_test.lower() in ['', 'y', 'n']:
break
else:
print('Please input y or n')
if send_test.lower() in ['', 'y']:
send_notification('sct_run_batch: test notification', 'Looks good')
# Set up output directories and create them if they don't already exist
path_output = os.path.abspath(os.path.expanduser(arguments.path_output))
path_results = os.path.join(path_output, 'results')
path_data_processed = os.path.join(path_output, 'data_processed')
path_log = os.path.join(path_output, 'log')
path_qc = os.path.join(path_output, 'qc')
path_segmanual = os.path.abspath(os.path.expanduser(arguments.path_segmanual))
script = os.path.abspath(os.path.expanduser(arguments.script))
path_data = os.path.abspath(os.path.expanduser(arguments.path_data))
for pth in [path_output, path_results, path_data_processed, path_log, path_qc]:
os.makedirs(pth, exist_ok=True)
# Check that the script can be found
if not os.path.exists(script):
raise FileNotFoundError('Couldn\'t find the script script at {}'.format(script))
# Setup overall log
batch_log = open(os.path.join(path_log, arguments.batch_log), 'w')
# Duplicate init_sct message to batch_log
print('\n--\nSpinal Cord Toolbox ({})\n'.format(__version__), file=batch_log, flush=True)
# Tee IO to batch_log and std(out/err)
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sys.stdout = Tee(batch_log, orig_stdout)
sys.stderr = Tee(batch_log, orig_stderr)
def reset_streams():
sys.stdout = orig_stdout
sys.stderr = orig_stderr
# Display OS
print("INFO SYSTEM")
print("-----------")
platform_running = sys.platform
if platform_running.startswith('darwin'):
os_running = 'osx'
elif platform_running.startswith('linux'):
os_running = 'linux'
elif platform_running.startswith('win32'):
os_running = 'windows'
else:
os_running = platform_running
print('OS: ' + os_running + ' (' + platform.platform() + ')')
# Display number of CPU cores
print('CPU cores: Available: {} | Threads used by ITK Programs: {}'.format(multiprocessing.cpu_count(), arguments.itk_threads))
# Display RAM available
print("RAM: Total {} MB | Available {} MB | Used {} MB".format(
int(psutil.virtual_memory().total / 1024 / 1024),
int(psutil.virtual_memory().available / 1024 / 1024),
int(psutil.virtual_memory().used / 1024 / 1024),
))
# Log the current arguments (in yaml because it's cleaner)
print('\nINPUT ARGUMENTS')
print("---------------")
print(yaml.dump(vars(arguments)))
# Display script version info
print("SCRIPT")
print("------")
print("git commit: {}".format(__get_commit(path_to_git_folder=os.path.dirname(script))))
print("git origin: {}".format(__get_git_origin(path_to_git_folder=os.path.dirname(script))))
print("Copying script to output folder...")
if os.path.isdir(script):
print("Input folder is a directory (not a file). Skipping copy.")
else:
try:
# Copy the script and record the new location
script_copy = os.path.abspath(shutil.copy(script, arguments.path_output))
print("{} -> {}".format(script, script_copy))
script = script_copy
except shutil.SameFileError:
print("Input and output folder are the same. Skipping copy.")
pass
print("Setting execute permissions for script file {} ...".format(arguments.script))
script_stat = os.stat(script)
os.chmod(script, script_stat.st_mode | S_IEXEC)
# Display data version info
print("\nDATA")
print("----")
print("git commit: {}".format(__get_commit(path_to_git_folder=path_data)))
print("git origin: {}\n".format(__get_git_origin(path_to_git_folder=path_data)))
# Find subjects and process inclusion/exclusions
subject_dirs = []
subject_flat_dirs = [f for f in os.listdir(path_data) if f.startswith(arguments.subject_prefix)]
for isub in subject_flat_dirs:
# Only consider folders
if os.path.isdir(os.path.join(path_data, isub)):
session_dirs = [f for f in os.listdir(os.path.join(path_data, isub)) if f.startswith('ses-')]
if not session_dirs:
# There is no session folder, so we consider only sub- directory: sub-XX
subject_dirs.append(isub)
else:
# There is a session folder, so we concatenate: sub-XX/ses-YY
session_dirs.sort()
for isess in session_dirs:
subject_dirs.append(os.path.join(isub, isess))
# Handle inclusion lists
assert not ((arguments.include is not None) and (arguments.include_list is not None)),\
'Only one of `include` and `include-list` can be used'
if arguments.include is not None:
subject_dirs = [f for f in subject_dirs if re.search(arguments.include, f) is not None]
if arguments.include_list is not None:
# TODO decide if we should warn users if one of their inclusions isn't around
subject_dirs = [f for f in subject_dirs if f in arguments.include_list]
# Handle exclusions
assert not ((arguments.exclude is not None) and (arguments.exclude_list is not None)),\
'Only one of `exclude` and `exclude-list` can be used'
if arguments.exclude is not None:
subject_dirs = [f for f in subject_dirs if re.search(arguments.exclude, f) is None]
if arguments.exclude_list is not None:
subject_dirs = [f for f in subject_dirs if f not in arguments.exclude_list]
# Determine the number of jobs we can run simultaneously
if arguments.jobs < 1:
jobs = multiprocessing.cpu_count() + arguments.jobs
else:
jobs = arguments.jobs
print("RUNNING")
print("-------")
print("Processing {} subjects in parallel. (Worker processes used: {}).".format(len(subject_dirs), jobs))
# Run the jobs, recording start and end times
start = datetime.datetime.now()
# Trap errors to send an email if a script fails.
try:
with multiprocessing.Pool(jobs) as p:
run_single_dir = functools.partial(run_single,
script=script,
script_args=arguments.script_args,
path_segmanual=path_segmanual,
path_data=path_data,
path_data_processed=path_data_processed,
path_results=path_results,
path_log=path_log,
path_qc=path_qc,
itk_threads=arguments.itk_threads,
continue_on_error=arguments.continue_on_error)
results = list(p.imap(run_single_dir, subject_dirs))
except Exception as e:
if do_email:
message = ('Oh no there has been the following error in your pipeline:\n\n'
'{}'.format(e))
try:
# I consider the multiprocessing error more significant than a potential email error, this
# ensures that the multiprocessing error is signalled.
send_notification('sct_run_batch errored', message)
except Exception:
raise e
raise e
else:
raise e
end = datetime.datetime.now()
# Check for failed subjects
fails = [sd for (sd, ret) in zip(subject_dirs, results) if ret.returncode != 0]
if len(fails) == 0:
status_message = '\nHooray! your batch completed successfully :-)\n'
else:
status_message = ('\nYour batch completed but some subjects may have not completed '
'successfully, please consult the logs for:\n'
'{}\n'.format('\n'.join(fails)))
print(status_message)
# Display timing
duration = end - start
timing_message = ('Started: {} | Ended: {} | Duration: {}\n'.format(
start.strftime('%Hh%Mm%Ss'),
end.strftime('%Hh%Mm%Ss'),
(datetime.datetime.utcfromtimestamp(0) + duration).strftime('%Hh%Mm%Ss')))
print(timing_message)
if do_email:
send_notification('sct_run_batch: Run completed',
status_message + timing_message)
display_open(file=os.path.join(path_qc, "index.html"),
message="To open the Quality Control (QC) report in a web-browser")
if arguments.zip:
file_zip = 'sct_run_batch_{}'.format(time.strftime('%Y%m%d%H%M%S'))
path_tmp = os.path.join(tempfile.mkdtemp(), file_zip)
os.makedirs(os.path.join(path_tmp, file_zip))
for folder in [path_log, path_qc, path_results]:
shutil.copytree(folder, os.path.join(path_tmp, file_zip, os.path.split(folder)[-1]))
shutil.make_archive(os.path.join(path_output, file_zip), 'zip', path_tmp)
shutil.rmtree(path_tmp)
print("\nOutput zip archive: {}.zip".format(os.path.join(path_output, file_zip)))
reset_streams()
batch_log.close()
|
28,017 | def __convert_reports(reports: List[Report],
repo_dir: Union[str, None],
report_url: Union[str, None],
changed_files: List[str],
changed_file_path: Union[str, None],
severity_map: Dict[str, str]) -> Dict:
"""Convert the given reports to gerrit json format.
This function will convert the given report to Gerrit json format.
reports - list of reports comming from a plist file or
from the CodeChecker server (both types can be processed)
repo_dir - Root directory of the sources, i.e. the directory where the
repository was cloned.
report_url - URL where the report can be found something like this:
"http://jenkins_address/userContent/$JOB_NAME/$BUILD_NUM/index.html"
changed_files - list of the changed files
severity_map
"""
review_comments = {}
report_count = 0
for report in reports:
bug_line = report.line
bug_col = report.col
check_name = report.check_name
severity = severity_map.get(check_name, "UNSPECIFIED")
file_name = report.file_path
check_msg = report.description
source_line = report.line
# Skip the report if it is not in the changed files.
if changed_file_path and not \
any([file_name.endswith(c) for c in changed_files]):
continue
report_count += 1
# file_name can be without a path in the report.
rel_file_path = os.path.relpath(file_name, repo_dir) \
if repo_dir and os.path.dirname(file_name) !="" else file_name
checked_file = rel_file_path \
+ ':' + str(bug_line) + ":" + str(bug_col)
if rel_file_path not in review_comments:
review_comments[rel_file_path] = []
review_comment_msg = "[{0}] {1}: {2} [{3}]\n{4}".format(
severity, checked_file, check_msg, check_name, source_line)
review_comments[rel_file_path].append({
"range": {
"start_line": bug_line,
"start_character": bug_col,
"end_line": bug_line,
"end_character": bug_col},
"message": review_comment_msg})
message = "CodeChecker found {0} issue(s) in the code.".format(
report_count)
if report_url:
message += " See: '{0}'".format(report_url)
review = {"tag": "jenkins",
"message": message,
"labels": {
"Code-Review": -1 if report_count else 1,
"Verified": -1 if report_count else 1},
"comments": review_comments}
return review
| def __convert_reports(reports: List[Report],
repo_dir: Union[str, None],
report_url: Union[str, None],
changed_files: List[str],
changed_file_path: Union[str, None],
severity_map: Dict[str, str]) -> Dict:
"""Convert the given reports to gerrit json format.
This function will convert the given report to Gerrit json format.
reports - list of reports comming from a plist file or
from the CodeChecker server (both types can be processed)
repo_dir - Root directory of the sources, i.e. the directory where the
repository was cloned.
report_url - URL where the report can be found something like this:
"http://jenkins_address/userContent/$JOB_NAME/$BUILD_NUM/index.html"
changed_files - list of the changed files
severity_map
"""
review_comments = {}
report_count = 0
for report in reports:
bug_line = report.line
bug_col = report.col
check_name = report.check_name
severity = severity_map.get(check_name, "UNSPECIFIED")
file_name = report.file_path
check_msg = report.description
source_line = report.line
# Skip the report if it is not in the changed files.
if changed_file_path and not \
any([file_name.endswith(c) for c in changed_files]):
continue
report_count += 1
# file_name can be without a path in the report.
rel_file_path = os.path.relpath(file_name, repo_dir) \
if repo_dir and os.path.dirname(file_name) != "" else file_name
checked_file = rel_file_path \
+ ':' + str(bug_line) + ":" + str(bug_col)
if rel_file_path not in review_comments:
review_comments[rel_file_path] = []
review_comment_msg = "[{0}] {1}: {2} [{3}]\n{4}".format(
severity, checked_file, check_msg, check_name, source_line)
review_comments[rel_file_path].append({
"range": {
"start_line": bug_line,
"start_character": bug_col,
"end_line": bug_line,
"end_character": bug_col},
"message": review_comment_msg})
message = "CodeChecker found {0} issue(s) in the code.".format(
report_count)
if report_url:
message += " See: '{0}'".format(report_url)
review = {"tag": "jenkins",
"message": message,
"labels": {
"Code-Review": -1 if report_count else 1,
"Verified": -1 if report_count else 1},
"comments": review_comments}
return review
|
1,391 | def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None, zero_division="warn"):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the average of
the F1 score of each class with weighting depending on the ``average``
parameter.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
zero_division : string or int, default="warn"
Sets the behavior when there is a zero division. If set to
("warn"|0)/1, returns 0/1 when both precision and recall are zero
(calculated using the same value for this parameter).
If ``zero_division != "warn"``, warnings are suppressed
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
See also
--------
fbeta_score, precision_recall_fscore_support, jaccard_similarity_score,
multilabel_confusion_matrix
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([0.8, 0. , 0. ])
>>> y_true = [0, 0, 0, 0, 0, 0]
>>> y_pred = [0, 0, 0, 0, 0, 0]
>>> f1_score(y_true, y_pred, zero_division=1)
1.0...
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight,
zero_division=zero_division)
| def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None, zero_division="warn"):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the average of
the F1 score of each class with weighting depending on the ``average``
parameter.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
zero_division : "warn", 0 or 1
Sets the behavior when there is a zero division. If set to
("warn"|0)/1, returns 0/1 when both precision and recall are zero
(calculated using the same value for this parameter).
If ``zero_division != "warn"``, warnings are suppressed
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
See also
--------
fbeta_score, precision_recall_fscore_support, jaccard_similarity_score,
multilabel_confusion_matrix
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([0.8, 0. , 0. ])
>>> y_true = [0, 0, 0, 0, 0, 0]
>>> y_pred = [0, 0, 0, 0, 0, 0]
>>> f1_score(y_true, y_pred, zero_division=1)
1.0...
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight,
zero_division=zero_division)
|
17,606 | def brave(query, lang):
# brave search autocompleter
url = 'https://search.brave.com/api/suggest?{query}'
resp = get(url.format(query=urlencode({'q': query})))
results = []
if resp.ok:
data = loads(resp.text)
for suggeestion in data[1]:
results.append(suggestion)
return results
| def brave(query, lang):
# brave search autocompleter
url = 'https://search.brave.com/api/suggest?{query}'
resp = get(url.format(query=urlencode({'q': query})))
results = []
if resp.ok:
data = loads(resp.text)
for suggestion in data[1]:
results.append(suggestion)
return results
|
32,333 | def split_context_path(path: str) -> Tuple[List[str], str]:
"""
Split a context path separated by a dot with a replacement name
following a comma into the key tree the replacement name.
:param path: The context path (with a optional replacement name)
:return: The key tree and the replacement name.
"""
key_tree = []
key = []
itr = iter(path)
for c in itr:
if c == '\\':
try:
key.append(next(itr))
except StopIteration:
key.append('\\')
elif c == '.':
key_tree.append(''.join(key))
key = []
else:
key.append(c)
names = ''.join(key).rsplit(',', 1)
if len(names) == 2:
key_tree.append(names[0])
return key_tree, names[1]
elif len(names) == 1:
key_tree.append(names[0])
return key_tree, names[0]
else:
raise ValueError(f'Invalid path: {path}')
| def split_context_path(path: str) -> Tuple[List[str], str]:
"""
Split a context path separated by a dot with a replacement name
following a comma into the key tree the replacement name.
:param path: The context path (with an optional replacement name)
:return: The key tree and the replacement name.
"""
key_tree = []
key = []
itr = iter(path)
for c in itr:
if c == '\\':
try:
key.append(next(itr))
except StopIteration:
key.append('\\')
elif c == '.':
key_tree.append(''.join(key))
key = []
else:
key.append(c)
names = ''.join(key).rsplit(',', 1)
if len(names) == 2:
key_tree.append(names[0])
return key_tree, names[1]
elif len(names) == 1:
key_tree.append(names[0])
return key_tree, names[0]
else:
raise ValueError(f'Invalid path: {path}')
|
13,690 | def _check_user_auth_flow(site, user):
"""
Check if user belongs to an allowed domain and not whitelisted
then ask user to login through allowed domain SSO provider.
"""
if user and ENABLE_LOGIN_USING_THIRDPARTY_AUTH_ONLY.is_enabled():
allowed_domain = site.configuration.get_value('THIRD_PARTY_AUTH_ONLY_DOMAIN', '').lower()
email_parts = user.email.split('@')
if len(email_parts) != 2:
log.warn("User %s has nonstandard e-mail. Shortcircuting THIRD_PART_AUTH_ONLY_DOMAIN check.", user.id)
return
user_domain = email_parts[1].strip().lower()
# If user belongs to allowed domain and not whitelisted then user must login through allowed domain SSO
if user_domain == allowed_domain and not AllowedAuthUser.objects.filter(site=site, email=user.email).exists():
msg = Text(_(
u'As {allowed_domain} user, You must login with your {allowed_domain} '
u'{link_start}{provider} account{link_end}.'
)).format(
allowed_domain=allowed_domain,
link_start=HTML("<a href='{tpa_provider_link}'>").format(
tpa_provider_link='{dashboard_url}?tpa_hint={tpa_hint}'.format(
dashboard_url=reverse('dashboard'),
tpa_hint=site.configuration.get_value('THIRD_PARTY_AUTH_ONLY_HINT'),
)
),
provider=site.configuration.get_value('THIRD_PARTY_AUTH_ONLY_PROVIDER'),
link_end=HTML("</a>")
)
raise AuthFailedError(msg)
| def _check_user_auth_flow(site, user):
"""
Check if user belongs to an allowed domain and not whitelisted
then ask user to login through allowed domain SSO provider.
"""
if user and ENABLE_LOGIN_USING_THIRDPARTY_AUTH_ONLY.is_enabled():
allowed_domain = site.configuration.get_value('THIRD_PARTY_AUTH_ONLY_DOMAIN', '').lower()
email_parts = user.email.split('@')
if len(email_parts) != 2:
log.warn("User %s has nonstandard e-mail. Shortcircuiting THIRD_PART_AUTH_ONLY_DOMAIN check.", user.id)
return
user_domain = email_parts[1].strip().lower()
# If user belongs to allowed domain and not whitelisted then user must login through allowed domain SSO
if user_domain == allowed_domain and not AllowedAuthUser.objects.filter(site=site, email=user.email).exists():
msg = Text(_(
u'As {allowed_domain} user, You must login with your {allowed_domain} '
u'{link_start}{provider} account{link_end}.'
)).format(
allowed_domain=allowed_domain,
link_start=HTML("<a href='{tpa_provider_link}'>").format(
tpa_provider_link='{dashboard_url}?tpa_hint={tpa_hint}'.format(
dashboard_url=reverse('dashboard'),
tpa_hint=site.configuration.get_value('THIRD_PARTY_AUTH_ONLY_HINT'),
)
),
provider=site.configuration.get_value('THIRD_PARTY_AUTH_ONLY_PROVIDER'),
link_end=HTML("</a>")
)
raise AuthFailedError(msg)
|
7,366 | def _flip(arr, axes=None):
""" Reverse array over many axes. Generalization of arr[::-1] for many
dimensions. If `axes` is `None`, flip along all axes. """
if axes is None:
reverse = [slice(None, None, -1)] * arr.ndim
else:
reverse = [slice(None, None, None)] * arr.ndim
for axis in axes:
reverse[axis] = slice(None, None, -1)
return arr[tuple(reverse)]
| def _flip(arr, axes=None):
"""Reverse array over many axes. Generalization of arr[::-1] for many
dimensions. If `axes` is `None`, flip along all axes. """
if axes is None:
reverse = [slice(None, None, -1)] * arr.ndim
else:
reverse = [slice(None, None, None)] * arr.ndim
for axis in axes:
reverse[axis] = slice(None, None, -1)
return arr[tuple(reverse)]
|
40,054 | def disassemble(bytecode: str) -> list:
"""Disassembles evm bytecode and returns a list of instructions"""
instruction_list = []
address = 0
length = len(bytecode)
if "bzzr" in str(bytecode[-43:]):
# ignore swarm hash
length -= 43
while address < length:
try:
op_code = opcodes[bytecode[address]]
except KeyError:
instruction_list.append(EvmInstruction(address, "INVALID"))
address += 1
continue
op_code_name = op_code[0]
current_instruction = EvmInstruction(address, op_code_name)
match = re.search(regex_PUSH, op_code_name)
if match:
argument_bytes = bytecode[address + 1: address + 1 + int(match.group(1))]
current_instruction.argument = "0x" + argument_bytes.hex()
address += int(match.group(1))
instruction_list.append(current_instruction)
address += 1
# We use a to_dict() here for compatibility reasons
return list(map(lambda element: element.to_dict(), instruction_list))
| def disassemble(bytecode: str) -> list:
"""Disassembles evm bytecode and returns a list of instructions"""
instruction_list = []
address = 0
length = len(bytecode)
if "bzzr" in str(bytecode[-43:]):
# ignore swarm hash
length -= 43
while address < length:
try:
op_code = opcodes[bytecode[address]]
except KeyError:
instruction_list.append(EvmInstruction(address, "INVALID"))
address += 1
continue
op_code_name = op_code[0]
current_instruction = EvmInstruction(address, op_code_name)
match = re.search(regex_PUSH, op_code_name)
if match:
argument_bytes = bytecode[address + 1: address + 1 + int(match.group(1))]
current_instruction.argument = "0x" + argument_bytes.hex()
address += int(match.group(1))
instruction_list.append(current_instruction)
address += 1
# We use a to_dict() here for compatibility reasons
return [element.to_dict() for element in instruction_list]
|
57,832 | def main():
emails = argToList(demisto.args().get('input'))
list_results = [email_address for email_address in emails if verify_is_email(email_address)]
if list_results:
demisto.results(list_results)
else:
demisto.results('')
| def main():
emails = argToList(demisto.args().get('input'))
list_results = [email_address for email_address in emails if verify_is_email(email_address)]
if list_results:
return_results(list_results)
else:
return_results('')
|
8,659 | def main(argv=None):
try:
# Step One: Parse The Command Line
parser = build_parser()
opts = parser.parse_args(argv or None)
# Step Two: "Do not run as root" checks
try:
check_not_root()
except RuntimeError as err:
stderr('%s' % err)
return 1
# Step Three: No-config required options
if opts.version:
print_version()
return
if opts.wizard:
_wizard('all', opts.config)
return
if opts.mod_wizard:
_wizard('mod', opts.config)
return
if opts.list_configs:
print_config()
return
# Step Four: Get the configuration file and prepare to run
try:
config_module = get_configuration(opts)
except ConfigurationError as e:
stderr(e)
return 2
if config_module.core.not_configured:
stderr('Bot is not configured, can\'t start')
# exit with code 2 to prevent auto restart on fail by systemd
return 2
# Step Five: Manage logfile, stdout and stderr
logfile = os.path.os.path.join(config_module.core.logdir, 'stdio.log')
sys.stderr = tools.OutputRedirect(logfile, True, opts.quiet)
sys.stdout = tools.OutputRedirect(logfile, False, opts.quiet)
# Step Six: Handle --quit, --kill and saving the PID to file
pid_dir = config_module.core.pid_dir
pid_file_path = get_pid_filename(opts, pid_dir)
old_pid = get_running_pid(pid_file_path)
if old_pid is not None and tools.check_pid(old_pid):
if not opts.quit and not opts.kill:
stderr('There\'s already a Sopel instance running with this config file')
stderr('Try using the --quit or the --kill options')
return 1
elif opts.kill:
stderr('Killing the sopel')
os.kill(old_pid, signal.SIGKILL)
return
elif opts.quit:
stderr('Signaling Sopel to stop gracefully')
if hasattr(signal, 'SIGUSR1'):
os.kill(old_pid, signal.SIGUSR1)
else:
os.kill(old_pid, signal.SIGTERM)
return
elif opts.kill or opts.quit:
stderr('Sopel is not running!')
return 1
if opts.daemonize:
child_pid = os.fork()
if child_pid is not 0:
return
with open(pid_file_path, 'w') as pid_file:
pid_file.write(str(os.getpid()))
# Step Seven: Initialise And Run sopel
run(config_module, pid_file_path)
except KeyboardInterrupt:
print("\n\nInterrupted")
return 1
| def main(argv=None):
try:
# Step One: Parse The Command Line
parser = build_parser()
opts = parser.parse_args(argv or None)
# Step Two: "Do not run as root" checks
try:
check_not_root()
except RuntimeError as err:
stderr('%s' % err)
return 1
# Step Three: No-config required options
if opts.version:
print_version()
return
if opts.wizard:
_wizard('all', opts.config)
return
if opts.mod_wizard:
_wizard('mod', opts.config)
return
if opts.list_configs:
print_config()
return
# Step Four: Get the configuration file and prepare to run
try:
config_module = get_configuration(opts)
except ConfigurationError as e:
stderr(e)
return 2
if config_module.core.not_configured:
stderr('Bot is not configured, can\'t start')
# exit with code 2 to prevent auto restart on fail by systemd
return 2
# Step Five: Manage logfile, stdout and stderr
logfile = os.path.os.path.join(config_module.core.logdir, 'stdio.log')
sys.stderr = tools.OutputRedirect(logfile, True, opts.quiet)
sys.stdout = tools.OutputRedirect(logfile, False, opts.quiet)
# Step Six: Handle --quit, --kill and saving the PID to file
pid_dir = config_module.core.pid_dir
pid_file_path = get_pid_filename(opts, pid_dir)
old_pid = get_running_pid(pid_file_path)
if old_pid is not None and tools.check_pid(old_pid):
if not opts.quit and not opts.kill:
stderr('There\'s already a Sopel instance running with this config file')
stderr('Try using the --quit or the --kill options')
return 1
elif opts.kill:
stderr('Killing the Sopel')
os.kill(old_pid, signal.SIGKILL)
return
elif opts.quit:
stderr('Signaling Sopel to stop gracefully')
if hasattr(signal, 'SIGUSR1'):
os.kill(old_pid, signal.SIGUSR1)
else:
os.kill(old_pid, signal.SIGTERM)
return
elif opts.kill or opts.quit:
stderr('Sopel is not running!')
return 1
if opts.daemonize:
child_pid = os.fork()
if child_pid is not 0:
return
with open(pid_file_path, 'w') as pid_file:
pid_file.write(str(os.getpid()))
# Step Seven: Initialise And Run sopel
run(config_module, pid_file_path)
except KeyboardInterrupt:
print("\n\nInterrupted")
return 1
|
875 | def test_sympy__stats__frv_types__RobustSolitonDistribution():
from sympy.stats.frv_types import RobustSolitonDistribution
assert _test_args(RobustSolitonDistribution(1000,0.5,0.1))
| def test_sympy__stats__frv_types__RobustSolitonDistribution():
from sympy.stats.frv_types import RobustSolitonDistribution
assert _test_args(RobustSolitonDistribution(1000, 0.5, 0.1))
|
49,543 | def test_get_agent_with_different_registries(application_economics, agency, test_registry, agency_local_registry):
# Get agents using same registry instance
staking_agent_1 = ContractAgency.get_agent(PREApplicationAgent, registry=test_registry)
staking_agent_2 = ContractAgency.get_agent(PREApplicationAgent, registry=test_registry)
assert staking_agent_2.registry_str == staking_agent_1.registry_str == str(test_registry)
assert staking_agent_2 is staking_agent_1
# Same content but different classes of registries
staking_agent_2 = ContractAgency.get_agent(PREApplicationAgent, registry=agency_local_registry)
assert staking_agent_2.registry_str == str(test_registry)
assert staking_agent_2 is staking_agent_1
| def test_get_agent_with_different_registries(application_economics, agency, test_registry, agency_local_registry):
# Get agents using same registry instance
application_agent_1 = ContractAgency.get_agent(PREApplicationAgent, registry=test_registry)
application_agent_2 = ContractAgency.get_agent(PREApplicationAgent, registry=test_registry)
assert staking_agent_2.registry_str == staking_agent_1.registry_str == str(test_registry)
assert staking_agent_2 is staking_agent_1
# Same content but different classes of registries
staking_agent_2 = ContractAgency.get_agent(PREApplicationAgent, registry=agency_local_registry)
assert staking_agent_2.registry_str == str(test_registry)
assert staking_agent_2 is staking_agent_1
|
30,513 | def get_feeds() -> set:
"""Return all enabled modules
@return:
A set with feed names
"""
modules = demisto.getModules() # type: ignore # pylint: disable=E1101
return {module_details["brand"] for instance_name, module_details in modules.items() if # pylint: disable=E1101
active_feed(module_details)}
| def get_feeds() -> set:
"""Return all enabled modules
@return:
A set with feed names
"""
modules = demisto.getModules() # type: ignore # pylint: disable=E1101
return {module_details["brand"] for module_details in modules.values() if # pylint: disable=E1101
active_feed(module_details)}
|
57,936 | def build_attribute_feed_hit(event: dict, demisto_args=None):
""" We want to have the Attribute data as part of the search-events results only if the user asked for
include_feed_correlations. Otherwise, we don't want to return attributes data at all."""
if demisto_args and argToBoolean(demisto_args.get('include_feed_correlations', False)):
if event.get('Attribute'):
event['Attribute'] = [
{
'id': attribute.get('id'),
'value': attribute.get('value'),
'Feed': attribute.get('Feed')
} for attribute in event['Attribute']
]
else:
event.pop('Attribute')
| def build_attribute_feed_hit(event: dict, demisto_args=None):
"""
We want to have the attributes data as part of the search-events context results only if the user asked for
include_feed_correlations.
Otherwise, we don't want to return attributes data at all.
"""
if demisto_args and argToBoolean(demisto_args.get('include_feed_correlations', False)):
if event.get('Attribute'):
event['Attribute'] = [
{
'id': attribute.get('id'),
'value': attribute.get('value'),
'Feed': attribute.get('Feed')
} for attribute in event['Attribute']
]
else:
event.pop('Attribute')
|
14,395 | def lambert_porkchop(dpt_body, arr_body, dpt_t, arr_t):
"""
This function returns the increment in departure and arrival velocities.
"""
# Compute departure and arrival positions
rr_dpt_body, vv_dpt_body = coord.get_body_barycentric_posvel(dpt_body.name, dpt_t)
rr_arr_body, vv_arr_body = coord.get_body_barycentric_posvel(arr_body.name, arr_t)
# Compute time of flight
tof = arr_t - dpt_t
if tof <= 0:
return None, None, None, None, None
try:
(v_dpt, v_arr), = lambert(Sun.k, rr_dpt_body.xyz, rr_arr_body.xyz, tof)
# Compute all the output variables
dv_dpt = norm(v_dpt - vv_dpt_body.xyz)
dv_arr = norm(v_arr - vv_arr_body.xyz)
c3_launch = dv_dpt.value ** 2
c3_arrival = dv_arr.value ** 2
return dv_dpt.value, dv_arr.value, c3_launch, c3_arrival, tof.jd
except AssertionError:
return None, None, None, None, None
| def _lambert_porkchop(dpt_body, arr_body, dpt_t, arr_t):
"""
This function returns the increment in departure and arrival velocities.
"""
# Compute departure and arrival positions
rr_dpt_body, vv_dpt_body = coord.get_body_barycentric_posvel(dpt_body.name, dpt_t)
rr_arr_body, vv_arr_body = coord.get_body_barycentric_posvel(arr_body.name, arr_t)
# Compute time of flight
tof = arr_t - dpt_t
if tof <= 0:
return None, None, None, None, None
try:
(v_dpt, v_arr), = lambert(Sun.k, rr_dpt_body.xyz, rr_arr_body.xyz, tof)
# Compute all the output variables
dv_dpt = norm(v_dpt - vv_dpt_body.xyz)
dv_arr = norm(v_arr - vv_arr_body.xyz)
c3_launch = dv_dpt.value ** 2
c3_arrival = dv_arr.value ** 2
return dv_dpt.value, dv_arr.value, c3_launch, c3_arrival, tof.jd
except AssertionError:
return None, None, None, None, None
|
14,624 | def add_unseen_labels(train_label_dict, test_label_list):
"""
Merge test set labels that not seen in the training data with seen ones.
Parameters
----------
train_label_dict : dict
Dictionary mapping training set class labels to class indices.
test_label_list : list
List containing labels in the test set.
Returns
-------
train_and_test_label_dict : dict
Dictionary mapping merged lables from both the training and test set
to indices.
"""
# get the list of labels that were in the training set
train_label_list = list(train_label_dict.keys())
# identify any unseen labels in the test set
unseen_test_label_list = [label for label in test_label_list
if label not in train_label_list]
# create a new dictionary for these unseen labels with label indices
# for them starting _after_ those for the training set labels
unseen_label_dict = {label: i for i, label in enumerate(unseen_test_label_list,
start=len(train_label_list))}
# combine the train label dictionary with this unseen label one & return
train_and_test_label_dict = train_label_dict.copy()
train_and_test_label_dict.update(unseen_label_dict)
return train_and_test_label_dict
| def add_unseen_labels(train_label_dict, test_label_list):
"""
Merge test set labels that not seen in the training data with seen ones.
Parameters
----------
train_label_dict : dict
Dictionary mapping training set class labels to class indices.
test_label_list : list
List containing labels in the test set.
Returns
-------
train_and_test_label_dict : dict
Dictionary mapping merged lables from both the training and test set
to indices.
"""
# get the list of labels that were in the training set
train_label_list = list(train_label_dict.keys())
# identify any unseen labels in the test set
unseen_test_label_list = [label for label in test_label_list
if label not in train_label_list]
# create a new dictionary for these unseen labels with label indices
# for them starting _after_ those for the training set labels
unseen_label_dict = {label: i for i, label in enumerate(unseen_test_label_list,
start=len(train_label_list))}
# combine the train label dictionary with this unseen label one & return
train_and_test_label_dict = train_label_dict.copy()
train_and_test_label_dict.update(unseen_label_dict)
return train_and_test_label_dict
|
7,516 | def download_file(remote_url, cache=False, show_progress=True, timeout=None,
sources=None, pkgname='astropy', http_headers=None,
ftp_tls=False):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
The timeout, in seconds. Otherwise, use
`astropy.utils.data.Conf.remote_timeout`. Set this to zero to prevent
any attempt to download anything.
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ftp_tls : bool
If True, use TLS with ftp URLs instead of the standard unsecured FTP.
Certain servers require this.
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because `download_file` returns a filename, another process could run
clear_download_cache before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = [remote_url]
if http_headers is None:
http_headers = {'User-Agent': conf.default_http_user_agent,
'Accept': '*/*'}
missing_cache = ""
url_key = remote_url
if cache:
try:
dldir = _get_download_cache_loc(pkgname)
except OSError:
cache = False
missing_cache = (
"Cache directory cannot be read or created, "
"providing data in temporary file instead."
)
else:
if cache == "update":
pass
elif isinstance(cache, str):
raise ValueError(f"Cache value '{cache}' was requested but "
f"'update' is the only recognized string; "
f"otherwise use a boolean")
else:
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
if os.path.exists(filename):
return os.path.abspath(filename)
errors = {}
for source_url in sources:
try:
f_name = _download_file_from_source(
source_url,
timeout=timeout,
show_progress=show_progress,
cache=cache,
remote_url=remote_url,
pkgname=pkgname,
http_headers=http_headers,
ftp_tls=ftp_tls)
# Success!
break
except urllib.error.URLError as e:
# errno 8 is from SSL "EOF occurred in violation of protocol"
if (hasattr(e, 'reason')
and hasattr(e.reason, 'errno')
and e.reason.errno == 8):
e.reason.strerror = (e.reason.strerror +
'. requested URL: '
+ remote_url)
e.reason.args = (e.reason.errno, e.reason.strerror)
errors[source_url] = e
except socket.timeout as e:
# this isn't supposed to happen, but occasionally a socket.timeout
# gets through. It's supposed to be caught in urllib and raised
# in this way, but for some reason in mysterious circumstances it
# doesn't (or didn't in python2?). So we'll just re-raise it here
# instead.
errors[source_url] = e
else: # No success
if not sources:
raise KeyError(
f"No sources listed and file {remote_url} not in cache! "
f"Please include primary URL in sources if you want it to be "
f"included as a valid source.")
elif len(sources) == 1:
raise errors[sources[0]]
else:
raise urllib.error.URLError(
f"Unable to open any source! Exceptions were {errors}") \
from errors[sources[0]]
if cache:
try:
return import_file_to_cache(url_key, f_name,
remove_original=True,
replace=(cache == 'update'),
pkgname=pkgname)
except PermissionError:
# Cache is readonly, we can't update it
missing_cache = (
f"Cache directory appears to be read-only, unable to import "
f"downloaded file, providing data in temporary file {f_name} "
f"instead.")
# FIXME: other kinds of cache problem can occur?
if missing_cache:
warn(CacheMissingWarning(missing_cache, f_name))
if conf.delete_temporary_downloads_at_exit:
global _tempfilestodel
_tempfilestodel.append(f_name)
return os.path.abspath(f_name)
| def download_file(remote_url, cache=False, show_progress=True, timeout=None,
sources=None, pkgname='astropy', http_headers=None,
ftp_tls=False):
"""Downloads a URL and optionally caches the result.
It returns the filename of a file containing the URL's contents.
If ``cache=True`` and the file is present in the cache, just
returns the filename; if the file had to be downloaded, add it
to the cache. If ``cache="update"`` always download and add it
to the cache.
The cache is effectively a dictionary mapping URLs to files; by default the
file contains the contents of the URL that is its key, but in practice
these can be obtained from a mirror (using ``sources``) or imported from
the local filesystem (using `~import_file_to_cache` or
`~import_download_cache`). Regardless, each file is regarded as
representing the contents of a particular URL, and this URL should be used
to look them up or otherwise manipulate them.
The files in the cache directory are named according to a cryptographic
hash of their URLs (currently MD5, so hackers can cause collisions).
The modification times on these files normally indicate when they were
last downloaded from the Internet.
Parameters
----------
remote_url : str
The URL of the file to download
cache : bool or "update", optional
Whether to cache the contents of remote URLs. If "update",
always download the remote URL in case there is a new version
and store the result in the cache.
show_progress : bool, optional
Whether to display a progress bar during the download (default
is `True`). Regardless of this setting, the progress bar is only
displayed when outputting to a terminal.
timeout : float, optional
The timeout, in seconds. Otherwise, use
`astropy.utils.data.Conf.remote_timeout`. Set this to zero to prevent
any attempt to download anything.
sources : list of str, optional
If provided, a list of URLs to try to obtain the file from. The
result will be stored under the original URL. The original URL
will *not* be tried unless it is in this list; this is to prevent
long waits for a primary server that is known to be inaccessible
at the moment. If an empty list is passed, then ``download_file``
will not attempt to connect to the Internet, that is, if the file
is not in the cache a KeyError will be raised.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
http_headers : dict or None
HTTP request headers to pass into ``urlopen`` if needed. (These headers
are ignored if the protocol for the ``name_or_obj``/``sources`` entry
is not a remote HTTP URL.) In the default case (None), the headers are
``User-Agent: some_value`` and ``Accept: */*``, where ``some_value``
is set by ``astropy.utils.data.conf.default_http_user_agent``.
ftp_tls : bool
If True, use TLS with ftp URLs instead of the standard unsecured FTP.
Certain servers require this.
Returns
-------
local_path : str
Returns the local path that the file was download to.
Raises
------
urllib.error.URLError
Whenever there's a problem getting the remote file.
KeyError
When a file was requested from the cache but is missing and no
sources were provided to obtain it from the Internet.
Notes
-----
Because `download_file` returns a filename, another process could run
`clear_download_cache` before you actually open the file, leaving
you with a filename that no longer points to a usable file.
"""
if timeout is None:
timeout = conf.remote_timeout
if sources is None:
sources = [remote_url]
if http_headers is None:
http_headers = {'User-Agent': conf.default_http_user_agent,
'Accept': '*/*'}
missing_cache = ""
url_key = remote_url
if cache:
try:
dldir = _get_download_cache_loc(pkgname)
except OSError:
cache = False
missing_cache = (
"Cache directory cannot be read or created, "
"providing data in temporary file instead."
)
else:
if cache == "update":
pass
elif isinstance(cache, str):
raise ValueError(f"Cache value '{cache}' was requested but "
f"'update' is the only recognized string; "
f"otherwise use a boolean")
else:
filename = os.path.join(dldir, _url_to_dirname(url_key), "contents")
if os.path.exists(filename):
return os.path.abspath(filename)
errors = {}
for source_url in sources:
try:
f_name = _download_file_from_source(
source_url,
timeout=timeout,
show_progress=show_progress,
cache=cache,
remote_url=remote_url,
pkgname=pkgname,
http_headers=http_headers,
ftp_tls=ftp_tls)
# Success!
break
except urllib.error.URLError as e:
# errno 8 is from SSL "EOF occurred in violation of protocol"
if (hasattr(e, 'reason')
and hasattr(e.reason, 'errno')
and e.reason.errno == 8):
e.reason.strerror = (e.reason.strerror +
'. requested URL: '
+ remote_url)
e.reason.args = (e.reason.errno, e.reason.strerror)
errors[source_url] = e
except socket.timeout as e:
# this isn't supposed to happen, but occasionally a socket.timeout
# gets through. It's supposed to be caught in urllib and raised
# in this way, but for some reason in mysterious circumstances it
# doesn't (or didn't in python2?). So we'll just re-raise it here
# instead.
errors[source_url] = e
else: # No success
if not sources:
raise KeyError(
f"No sources listed and file {remote_url} not in cache! "
f"Please include primary URL in sources if you want it to be "
f"included as a valid source.")
elif len(sources) == 1:
raise errors[sources[0]]
else:
raise urllib.error.URLError(
f"Unable to open any source! Exceptions were {errors}") \
from errors[sources[0]]
if cache:
try:
return import_file_to_cache(url_key, f_name,
remove_original=True,
replace=(cache == 'update'),
pkgname=pkgname)
except PermissionError:
# Cache is readonly, we can't update it
missing_cache = (
f"Cache directory appears to be read-only, unable to import "
f"downloaded file, providing data in temporary file {f_name} "
f"instead.")
# FIXME: other kinds of cache problem can occur?
if missing_cache:
warn(CacheMissingWarning(missing_cache, f_name))
if conf.delete_temporary_downloads_at_exit:
global _tempfilestodel
_tempfilestodel.append(f_name)
return os.path.abspath(f_name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.