id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
17,417 | def _plot1d(plotfunc):
"""
Decorator for common 2d plotting logic
Also adds the 2d plot method to class _PlotMethods
"""
commondoc = """
Parameters
----------
darray : DataArray
Must be 2 dimensional, unless creating faceted plots
x : string, optional
Coordinate for x axis. If None use darray.dims[1]
y : string, optional
Coordinate for y axis. If None use darray.dims[0]
hue : string, optional
Dimension or coordinate for which you want multiple lines plotted.
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib.axes.Axes, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
row : string, optional
If passed, make row faceted plots on this dimension name
col : string, optional
If passed, make column faceted plots on this dimension name
col_wrap : int, optional
Use together with ``col`` to wrap faceted plots
xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional
Specifies scaling for the x- and y-axes respectively
xticks, yticks : Specify tick locations for x- and y-axes
xlim, ylim : Specify x- and y-axes limits
xincrease : None, True, or False, optional
Should the values on the x axes be increasing from left to right?
if None, use the default for the matplotlib function.
yincrease : None, True, or False, optional
Should the values on the y axes be increasing from top to bottom?
if None, use the default for the matplotlib function.
add_labels : bool, optional
Use xarray metadata to label axes
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only used
for 2D and FacetGrid plots.
**kwargs : optional
Additional arguments to wrapped matplotlib function
Returns
-------
artist :
The same type of primitive artist that the wrapped matplotlib
function returns
"""
# Build on the original docstring
plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}"
# plotfunc and newplotfunc have different signatures:
# - plotfunc: (x, y, z, ax, **kwargs)
# - newplotfunc: (darray, *args, x, y, **kwargs)
# where plotfunc accepts numpy arrays, while newplotfunc accepts a DataArray
# and variable names. newplotfunc also explicitly lists most kwargs, so we
# need to shorten it
def signature(darray, *args, x, y, **kwargs):
pass
@override_signature(signature)
@functools.wraps(plotfunc)
def newplotfunc(
darray,
*args,
x=None,
y=None,
hue=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
# All 2d plots in xarray share this function signature.
# Method signature below should be consistent.
# Handle facetgrids first
if row or col:
allargs = locals().copy()
allargs.update(allargs.pop("kwargs"))
allargs.pop("darray")
allargs.pop("plotfunc")
if plotfunc.__name__ == "line":
return _easy_facetgrid(darray, line, kind="line", **allargs)
else:
raise ValueError(f"Faceting not implemented for {plotfunc.__name__}")
# The allargs dict passed to _easy_facetgrid above contains args
if args == ():
args = kwargs.pop("args", ())
else:
assert "args" not in kwargs
ax = get_axis(figsize, size, aspect, ax)
xplt, yplt, hueplt, hue_label = _infer_line_data(darray, x, y, hue)
primitive = plotfunc(xplt, yplt, ax, *args, add_labels=add_labels, **kwargs)
if add_labels:
ax.set_title(darray._title_for_slice())
if hueplt is not None and add_legend:
if plotfunc.__name__ == "hist":
handles = primitive[-1]
else:
handles = primitive
ax.legend(
handles=handles,
labels=list(hueplt.values),
title=label_from_attrs(hueplt),
)
_update_axes(
ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim
)
return primitive
# For use as DataArray.plot.plotmethod
@functools.wraps(newplotfunc)
def plotmethod(
_PlotMethods_obj,
*args,
x=None,
y=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs["darray"] = _PlotMethods_obj._da
allargs.update(kwargs)
for arg in ["_PlotMethods_obj", "newplotfunc", "kwargs"]:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc
| def _plot1d(plotfunc):
"""
Decorator for common 1d plotting logic.
Also adds the 2d plot method to class _PlotMethods
"""
commondoc = """
Parameters
----------
darray : DataArray
Must be 2 dimensional, unless creating faceted plots
x : string, optional
Coordinate for x axis. If None use darray.dims[1]
y : string, optional
Coordinate for y axis. If None use darray.dims[0]
hue : string, optional
Dimension or coordinate for which you want multiple lines plotted.
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib.axes.Axes, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
row : string, optional
If passed, make row faceted plots on this dimension name
col : string, optional
If passed, make column faceted plots on this dimension name
col_wrap : int, optional
Use together with ``col`` to wrap faceted plots
xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional
Specifies scaling for the x- and y-axes respectively
xticks, yticks : Specify tick locations for x- and y-axes
xlim, ylim : Specify x- and y-axes limits
xincrease : None, True, or False, optional
Should the values on the x axes be increasing from left to right?
if None, use the default for the matplotlib function.
yincrease : None, True, or False, optional
Should the values on the y axes be increasing from top to bottom?
if None, use the default for the matplotlib function.
add_labels : bool, optional
Use xarray metadata to label axes
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only used
for 2D and FacetGrid plots.
**kwargs : optional
Additional arguments to wrapped matplotlib function
Returns
-------
artist :
The same type of primitive artist that the wrapped matplotlib
function returns
"""
# Build on the original docstring
plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}"
# plotfunc and newplotfunc have different signatures:
# - plotfunc: (x, y, z, ax, **kwargs)
# - newplotfunc: (darray, *args, x, y, **kwargs)
# where plotfunc accepts numpy arrays, while newplotfunc accepts a DataArray
# and variable names. newplotfunc also explicitly lists most kwargs, so we
# need to shorten it
def signature(darray, *args, x, y, **kwargs):
pass
@override_signature(signature)
@functools.wraps(plotfunc)
def newplotfunc(
darray,
*args,
x=None,
y=None,
hue=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
# All 2d plots in xarray share this function signature.
# Method signature below should be consistent.
# Handle facetgrids first
if row or col:
allargs = locals().copy()
allargs.update(allargs.pop("kwargs"))
allargs.pop("darray")
allargs.pop("plotfunc")
if plotfunc.__name__ == "line":
return _easy_facetgrid(darray, line, kind="line", **allargs)
else:
raise ValueError(f"Faceting not implemented for {plotfunc.__name__}")
# The allargs dict passed to _easy_facetgrid above contains args
if args == ():
args = kwargs.pop("args", ())
else:
assert "args" not in kwargs
ax = get_axis(figsize, size, aspect, ax)
xplt, yplt, hueplt, hue_label = _infer_line_data(darray, x, y, hue)
primitive = plotfunc(xplt, yplt, ax, *args, add_labels=add_labels, **kwargs)
if add_labels:
ax.set_title(darray._title_for_slice())
if hueplt is not None and add_legend:
if plotfunc.__name__ == "hist":
handles = primitive[-1]
else:
handles = primitive
ax.legend(
handles=handles,
labels=list(hueplt.values),
title=label_from_attrs(hueplt),
)
_update_axes(
ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim
)
return primitive
# For use as DataArray.plot.plotmethod
@functools.wraps(newplotfunc)
def plotmethod(
_PlotMethods_obj,
*args,
x=None,
y=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs["darray"] = _PlotMethods_obj._da
allargs.update(kwargs)
for arg in ["_PlotMethods_obj", "newplotfunc", "kwargs"]:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc
|
54,288 | def _qmu_like(mu, data, pdf, init_pars, par_bounds):
"""
Clipped version of _tmu where _qmu = 0 if muhat > 0 else _tmu
If the lower bound of the POI is 0 this automatically implments
qmu_tilde. Otherwise this is qmu (no tilde).
"""
tensorlib, optimizer = get_backend()
tmu_stat, (mubhathat, muhatbhat) = _tmu_like(
mu, data, pdf, init_pars, par_bounds, return_fitted_pars=True
)
qmu = tensorlib.where(
muhatbhat[pdf.config.poi_index] > mu, tensorlib.astensor(0.0), tmu_stat
)
return qmu
| def _qmu_like(mu, data, pdf, init_pars, par_bounds):
"""
Clipped version of _tmu where _qmu = 0 if muhat > 0 else _tmu
If the lower bound of the POI is 0 this automatically implments
qmu_tilde. Otherwise this is qmu (no tilde).
"""
tensorlib, optimizer = get_backend()
tmu_stat, (mubhathat, muhatbhat) = _tmu_like(
mu, data, pdf, init_pars, par_bounds, return_fitted_pars=True
)
qmu_like = tensorlib.where(
muhatbhat[pdf.config.poi_index] > mu, tensorlib.astensor(0.0), tmu_stat
)
return qmu_like
|
32,689 | def test_pandas_dtypes(conn_cnx):
with conn_cnx(
session_parameters={
PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT: "arrow_force"
}
) as cnx:
with cnx.cursor() as cur:
cur.execute(
"select 1::integer, 2.3::double, 'foo'::string, current_timestamp()::timestamp where 1=0"
)
batches = cur.get_result_batches()
batch = batches[0].to_pandas()
assert batch.dtypes is not None
# assert batch.empty is not True TODO
pandas_dtypes = batch.dtypes
expected_types = [np.int64, float, np.object, np.datetime64]
# pd.string is represented as an np.object
# np.dtype string is not the same as pd.string (python)
for i, typ in enumerate(expected_types):
assert_dtype_equal(pandas_dtypes[i].type, np.dtype(typ).type)
| def test_pandas_dtypes(conn_cnx):
with conn_cnx(
session_parameters={
PARAMETER_PYTHON_CONNECTOR_QUERY_RESULT_FORMAT: "arrow_force"
}
) as cnx:
with cnx.cursor() as cur:
cur.execute(
"select 1::integer, 2.3::double, 'foo'::string, current_timestamp()::timestamp where 1=0"
)
batches = cur.get_result_batches()
batch = batches[0].to_pandas()
assert batch.dtypes is not None
# TODO: assert batch.empty is not True
pandas_dtypes = batch.dtypes
expected_types = [np.int64, float, np.object, np.datetime64]
# pd.string is represented as an np.object
# np.dtype string is not the same as pd.string (python)
for i, typ in enumerate(expected_types):
assert_dtype_equal(pandas_dtypes[i].type, np.dtype(typ).type)
|
35,262 | def emd(a, b, M, numItermax=100000, log=False, center_dual=True):
r"""Solves the Earth Movers distance problem and returns the OT matrix
.. math:: \gamma = arg\min_\gamma <\gamma,M>_F
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- M is the metric cost matrix
- a and b are the sample weights
.. warning:: Note that the M matrix in numpy needs to be a C-order
numpy.array in float64 format. It will be converted if not in this
format
.. note:: This function is backend-compatible and will work on arrays
from all compatible backends.
Uses the algorithm proposed in [1]_
Parameters
----------
a : (ns,) array-like, float
Source histogram (uniform weight if empty list)
b : (nt,) array-like, float
Target histogram (uniform weight if empty list)
M : (ns,nt) array-like, float
Loss matrix (c-order array in numpy with type float64)
numItermax : int, optional (default=100000)
The maximum number of iterations before stopping the optimization
algorithm if it has not converged.
log: bool, optional (default=False)
If True, returns a dictionary containing the cost and dual variables.
Otherwise returns only the optimal transportation matrix.
center_dual: boolean, optional (default=True)
If True, centers the dual potential using function
:ref:`center_ot_dual`.
Returns
-------
gamma: (ns x nt) array-like
Optimal transportation matrix for the given
parameters
log: dict, optional
If input log is true, a dictionary containing the
cost and dual variables and exit status
Examples
--------
Simple example with obvious solution. The function emd accepts lists and
perform automatic conversion to numpy arrays
>>> import ot
>>> a=[.5,.5]
>>> b=[.5,.5]
>>> M=[[0.,1.],[1.,0.]]
>>> ot.emd(a, b, M)
array([[0.5, 0. ],
[0. , 0.5]])
References
----------
.. [1] Bonneel, N., Van De Panne, M., Paris, S., & Heidrich, W. (2011,
December). Displacement interpolation using Lagrangian mass transport.
In ACM Transactions on Graphics (TOG) (Vol. 30, No. 6, p. 158). ACM.
See Also
--------
ot.bregman.sinkhorn : Entropic regularized OT ot.optim.cg : General
regularized OT"""
# convert to numpy if list
if type(a)==list:
a=np.array(a)
if type(b)==list:
b=np.array(b)
if type(M)==list:
M=np.array(M)
a0, b0, M0 = a, b, M
nx = get_backend(M0, a0, b0)
# convert to numpy
M = nx.to_numpy(M)
a = nx.to_numpy(a)
b = nx.to_numpy(b)
# ensure float64
a = np.asarray(a, dtype=np.float64)
b = np.asarray(b, dtype=np.float64)
M = np.asarray(M, dtype=np.float64, order='C')
# if empty array given then use uniform distributions
if len(a) == 0:
a = np.ones((M.shape[0],), dtype=np.float64) / M.shape[0]
if len(b) == 0:
b = np.ones((M.shape[1],), dtype=np.float64) / M.shape[1]
assert (a.shape[0] == M.shape[0] and b.shape[0] == M.shape[1]), \
"Dimension mismatch, check dimensions of M with a and b"
# ensure that same mass
np.testing.assert_almost_equal(a.sum(0),b.sum(0),err_msg='a and b vector must have the same sum')
b=b*a.sum()/b.sum()
asel = a != 0
bsel = b != 0
G, cost, u, v, result_code = emd_c(a, b, M, numItermax)
if center_dual:
u, v = center_ot_dual(u, v, a, b)
if np.any(~asel) or np.any(~bsel):
u, v = estimate_dual_null_weights(u, v, a, b, M)
result_code_string = check_result(result_code)
if log:
log = {}
log['cost'] = cost
log['u'] = nx.from_numpy(u, type_as=a0)
log['v'] = nx.from_numpy(v, type_as=b0)
log['warning'] = result_code_string
log['result_code'] = result_code
return nx.from_numpy(G, type_as=M0), log
return nx.from_numpy(G, type_as=M0)
| def emd(a, b, M, numItermax=100000, log=False, center_dual=True):
r"""Solves the Earth Movers distance problem and returns the OT matrix
.. math:: \gamma = arg\min_\gamma <\gamma,M>_F
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
where :
- M is the metric cost matrix
- a and b are the sample weights
.. warning:: Note that the M matrix in numpy needs to be a C-order
numpy.array in float64 format. It will be converted if not in this
format
.. note:: This function is backend-compatible and will work on arrays
from all compatible backends.
Uses the algorithm proposed in [1]_
Parameters
----------
a : (ns,) array-like, float
Source histogram (uniform weight if empty list)
b : (nt,) array-like, float
Target histogram (uniform weight if empty list)
M : (ns,nt) array-like, float
Loss matrix (c-order array in numpy with type float64)
numItermax : int, optional (default=100000)
The maximum number of iterations before stopping the optimization
algorithm if it has not converged.
log: bool, optional (default=False)
If True, returns a dictionary containing the cost and dual variables.
Otherwise returns only the optimal transportation matrix.
center_dual: boolean, optional (default=True)
If True, centers the dual potential using function
:ref:`center_ot_dual`.
Returns
-------
gamma: array-like, shape (ns, nt)
Optimal transportation matrix for the given
parameters
log: dict, optional
If input log is true, a dictionary containing the
cost and dual variables and exit status
Examples
--------
Simple example with obvious solution. The function emd accepts lists and
perform automatic conversion to numpy arrays
>>> import ot
>>> a=[.5,.5]
>>> b=[.5,.5]
>>> M=[[0.,1.],[1.,0.]]
>>> ot.emd(a, b, M)
array([[0.5, 0. ],
[0. , 0.5]])
References
----------
.. [1] Bonneel, N., Van De Panne, M., Paris, S., & Heidrich, W. (2011,
December). Displacement interpolation using Lagrangian mass transport.
In ACM Transactions on Graphics (TOG) (Vol. 30, No. 6, p. 158). ACM.
See Also
--------
ot.bregman.sinkhorn : Entropic regularized OT ot.optim.cg : General
regularized OT"""
# convert to numpy if list
if type(a)==list:
a=np.array(a)
if type(b)==list:
b=np.array(b)
if type(M)==list:
M=np.array(M)
a0, b0, M0 = a, b, M
nx = get_backend(M0, a0, b0)
# convert to numpy
M = nx.to_numpy(M)
a = nx.to_numpy(a)
b = nx.to_numpy(b)
# ensure float64
a = np.asarray(a, dtype=np.float64)
b = np.asarray(b, dtype=np.float64)
M = np.asarray(M, dtype=np.float64, order='C')
# if empty array given then use uniform distributions
if len(a) == 0:
a = np.ones((M.shape[0],), dtype=np.float64) / M.shape[0]
if len(b) == 0:
b = np.ones((M.shape[1],), dtype=np.float64) / M.shape[1]
assert (a.shape[0] == M.shape[0] and b.shape[0] == M.shape[1]), \
"Dimension mismatch, check dimensions of M with a and b"
# ensure that same mass
np.testing.assert_almost_equal(a.sum(0),b.sum(0),err_msg='a and b vector must have the same sum')
b=b*a.sum()/b.sum()
asel = a != 0
bsel = b != 0
G, cost, u, v, result_code = emd_c(a, b, M, numItermax)
if center_dual:
u, v = center_ot_dual(u, v, a, b)
if np.any(~asel) or np.any(~bsel):
u, v = estimate_dual_null_weights(u, v, a, b, M)
result_code_string = check_result(result_code)
if log:
log = {}
log['cost'] = cost
log['u'] = nx.from_numpy(u, type_as=a0)
log['v'] = nx.from_numpy(v, type_as=b0)
log['warning'] = result_code_string
log['result_code'] = result_code
return nx.from_numpy(G, type_as=M0), log
return nx.from_numpy(G, type_as=M0)
|
40,597 | def mock_snakemake(rulename, **wildcards):
"""
This function is expected to be executed from the 'scripts'-directory of '
the snakemake project. It returns a snakemake.script.Snakemake object,
based on the Snakefile.
If a rule has wildcards, you have to specify them in **wildcards.
Parameters
----------
rulename: str
name of the rule for which the snakemake object should be generated
**wildcards:
keyword arguments fixing the wildcards. Only necessary if wildcards are
needed.
"""
import snakemake as sm
import os
from pypsa.descriptors import Dict
from snakemake.script import Snakemake
from packaging.version import Version, parse
script_dir = Path(__file__).parent.resolve()
assert Path.cwd().resolve() == script_dir, \
f'mock_snakemake has to be run from the repository scripts directory {script_dir}'
os.chdir(script_dir.parent)
for p in sm.SNAKEFILE_CHOICES:
if os.path.exists(p):
snakefile = p
break
kwargs=dict(rerun_triggers=[]) if parse(sm.__version__) > Version("7.7.0") else {}
workflow = sm.Workflow(snakefile, overwrite_configfiles=[], **kwargs)
workflow.include(snakefile)
workflow.global_resources = {}
rule = workflow.get_rule(rulename)
dag = sm.dag.DAG(workflow, rules=[rule])
wc = Dict(wildcards)
job = sm.jobs.Job(rule, dag, wc)
def make_accessable(*ios):
for io in ios:
for i in range(len(io)):
io[i] = os.path.abspath(io[i])
make_accessable(job.input, job.output, job.log)
snakemake = Snakemake(job.input, job.output, job.params, job.wildcards,
job.threads, job.resources, job.log,
job.dag.workflow.config, job.rule.name, None,)
# create log and output dir if not existent
for path in list(snakemake.log) + list(snakemake.output):
Path(path).parent.mkdir(parents=True, exist_ok=True)
os.chdir(script_dir)
return snakemake
| def mock_snakemake(rulename, **wildcards):
"""
This function is expected to be executed from the 'scripts'-directory of '
the snakemake project. It returns a snakemake.script.Snakemake object,
based on the Snakefile.
If a rule has wildcards, you have to specify them in **wildcards.
Parameters
----------
rulename: str
name of the rule for which the snakemake object should be generated
**wildcards:
keyword arguments fixing the wildcards. Only necessary if wildcards are
needed.
"""
import snakemake as sm
import os
from pypsa.descriptors import Dict
from snakemake.script import Snakemake
from packaging.version import Version, parse
script_dir = Path(__file__).parent.resolve()
assert Path.cwd().resolve() == script_dir, \
f'mock_snakemake has to be run from the repository scripts directory {script_dir}'
os.chdir(script_dir.parent)
for p in sm.SNAKEFILE_CHOICES:
if os.path.exists(p):
snakefile = p
break
kwargs = dict(rerun_triggers=[]) if parse(sm.__version__) > Version("7.7.0") else {}
workflow = sm.Workflow(snakefile, overwrite_configfiles=[], **kwargs)
workflow.include(snakefile)
workflow.global_resources = {}
rule = workflow.get_rule(rulename)
dag = sm.dag.DAG(workflow, rules=[rule])
wc = Dict(wildcards)
job = sm.jobs.Job(rule, dag, wc)
def make_accessable(*ios):
for io in ios:
for i in range(len(io)):
io[i] = os.path.abspath(io[i])
make_accessable(job.input, job.output, job.log)
snakemake = Snakemake(job.input, job.output, job.params, job.wildcards,
job.threads, job.resources, job.log,
job.dag.workflow.config, job.rule.name, None,)
# create log and output dir if not existent
for path in list(snakemake.log) + list(snakemake.output):
Path(path).parent.mkdir(parents=True, exist_ok=True)
os.chdir(script_dir)
return snakemake
|
53,635 | def _subprocess_transform():
communicate = (bytes("string", "ascii"), bytes("string", "ascii"))
communicate_signature = "def communicate(self, input=None, timeout=None)"
args = """\
self, args, bufsize=-1, executable=None, stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=True, shell=False, cwd=None, env=None,
universal_newlines=None, startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=(), *, encoding=None, errors=None, text=None"""
py39_args = "user=None, group=None, extra_groups=None, umask=-1"
py310_args = "pipesize=-1"
py311_args = "process_group=None"
if PY39_PLUS:
args = ", ".join((args, py39_args))
if PY310_PLUS:
args = ", ".join((args, py310_args))
if PY311_PLUS:
args = ", ".join((args, py311_args))
init = f"""
def __init__({args}):
pass"""
wait_signature = "def wait(self, timeout=None)"
ctx_manager = """
def __enter__(self): return self
def __exit__(self, *args): pass
"""
py3_args = "args = []"
check_output_signature = """
check_output(
args, *,
stdin=None,
stderr=None,
shell=False,
cwd=None,
encoding=None,
errors=None,
universal_newlines=False,
timeout=None,
env=None,
text=None,
restore_signals=True,
preexec_fn=None,
pass_fds=(),
input=None,
bufsize=0,
executable=None,
close_fds=False,
startupinfo=None,
creationflags=0,
start_new_session=False
):
""".strip()
code = textwrap.dedent(
f"""
def {check_output_signature}
if universal_newlines:
return ""
return b""
class Popen(object):
returncode = pid = 0
stdin = stdout = stderr = file()
{py3_args}
{communicate_signature}:
return {communicate!r}
{wait_signature}:
return self.returncode
def poll(self):
return self.returncode
def send_signal(self, signal):
pass
def terminate(self):
pass
def kill(self):
pass
{ctx_manager}
"""
)
if PY39_PLUS:
code += """
@classmethod
def __class_getitem__(cls, item):
pass
"""
init_lines = textwrap.dedent(init).splitlines()
indented_init = "\n".join(" " * 4 + line for line in init_lines)
code += indented_init
return parse(code)
| def _subprocess_transform():
communicate = (bytes("string", "ascii"), bytes("string", "ascii"))
communicate_signature = "def communicate(self, input=None, timeout=None)"
args = """\
self, args, bufsize=-1, executable=None, stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=True, shell=False, cwd=None, env=None,
universal_newlines=None, startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=(), *, encoding=None, errors=None, text=None"""
if PY39_PLUS:
args += ", user=None, group=None, extra_groups=None, umask=-1"
if PY310_PLUS:
args += ", pipesize=-1"
if PY311_PLUS:
args += ", process_group=None"
init = f"""
def __init__({args}):
pass"""
wait_signature = "def wait(self, timeout=None)"
ctx_manager = """
def __enter__(self): return self
def __exit__(self, *args): pass
"""
py3_args = "args = []"
check_output_signature = """
check_output(
args, *,
stdin=None,
stderr=None,
shell=False,
cwd=None,
encoding=None,
errors=None,
universal_newlines=False,
timeout=None,
env=None,
text=None,
restore_signals=True,
preexec_fn=None,
pass_fds=(),
input=None,
bufsize=0,
executable=None,
close_fds=False,
startupinfo=None,
creationflags=0,
start_new_session=False
):
""".strip()
code = textwrap.dedent(
f"""
def {check_output_signature}
if universal_newlines:
return ""
return b""
class Popen(object):
returncode = pid = 0
stdin = stdout = stderr = file()
{py3_args}
{communicate_signature}:
return {communicate!r}
{wait_signature}:
return self.returncode
def poll(self):
return self.returncode
def send_signal(self, signal):
pass
def terminate(self):
pass
def kill(self):
pass
{ctx_manager}
"""
)
if PY39_PLUS:
code += """
@classmethod
def __class_getitem__(cls, item):
pass
"""
init_lines = textwrap.dedent(init).splitlines()
indented_init = "\n".join(" " * 4 + line for line in init_lines)
code += indented_init
return parse(code)
|
212 | def check_if_updates_and_builds_set(ctx, param, value):
"""
Print an error to stderr if the user has set both the --updates and --builds flags.
Args:
ctx (click.core.Context): The Click context, used to find out if the other flags are set.
param (click.core.Option): The option being handled.
value (unicode): The value of the param flag.
Returns:
unicode: The value of the param flag.
"""
if value is not None and ((param.name == 'builds' and ctx.params.get('updates', False)) or \
(param.name == 'updates' and ctx.params.get('builds', False))):
raise click.BadOptionUsage('ERROR: must specify only one of --updates or --builds')
return value
| def check_if_updates_and_builds_set(ctx, param, value):
"""
Print an error to stderr if the user has set both the --updates and --builds flags.
Args:
ctx (click.core.Context): The Click context, used to find out if the other flags are set.
param (click.core.Option): The option being handled.
value (unicode): The value of the param flag.
Returns:
str: The value of the param flag.
"""
if value is not None and ((param.name == 'builds' and ctx.params.get('updates', False)) or \
(param.name == 'updates' and ctx.params.get('builds', False))):
raise click.BadOptionUsage('ERROR: must specify only one of --updates or --builds')
return value
|
34,346 | def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
scaffold_parser = subparsers.add_parser(
"init",
parents=parents,
help="Creates a new project, with example training data, actions, and config files.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
scaffold_parser.add_argument(
"--no-prompt",
action="store_true",
help="Automatically choose default options for prompts and suppress warnings.",
)
scaffold_parser.add_argument(
"--init-dir", default=".", help="Descibe init folder path.",
)
scaffold_parser.set_defaults(func=run)
| def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
scaffold_parser = subparsers.add_parser(
"init",
parents=parents,
help="Creates a new project, with example training data, actions, and config files.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
scaffold_parser.add_argument(
"--no-prompt",
action="store_true",
help="Automatically choose default options for prompts and suppress warnings.",
)
scaffold_parser.add_argument(
"--init-dir", default=".", help="Directory where your project should be initialized.",
)
scaffold_parser.set_defaults(func=run)
|
31,670 | def fetch_incidents(last_run, first_run_time_range):
target_orgs = []
if BROKER:
orgs = list_org()
for org in orgs:
target_orgs.append(org['org_name'])
else:
target_orgs.append(None)
next_run = {}
incidents = []
for target_org in target_orgs:
if target_org:
last_fetch = last_run.get(f'{target_org}_last_fetch', None)
last_alert_id = last_run.get(f'{target_org}_last_alert_id', None)
else:
last_fetch = last_run.get('last_fetch', None)
last_alert_id = last_run.get('last_alert_id', None)
alert_time_max = datetime.utcnow()
if last_fetch is None:
alert_time_min = alert_time_max - timedelta(days=first_run_time_range)
else:
alert_time_min = dateparser.parse(last_fetch)
cov_alerts = list_alerts(target_org=target_org,
max_count=FETCH_LIMIT,
alert_time_min=alert_time_min.strftime(DATE_FORMAT),
alert_time_max=alert_time_max.strftime(DATE_FORMAT),
details='true')
latest_created_time = alert_time_min
for a in cov_alerts:
if a['id'] != last_alert_id:
created_time = datetime.utcfromtimestamp(a.get('createdTime', 0))
created_time_str = created_time.strftime(DATE_FORMAT)
if BROKER:
incident_name = f'''[{target_org}] [{a.get('type', 'No alert type')}] {a.get('analystTitle', 'No title')}'''
else:
incident_name = f'''[{a.get('type', 'No alert type')}] {a.get('analystTitle', 'No title')}'''
incident = {
'name': incident_name,
'occured': created_time_str,
'rawJSON': json.dumps(a)
}
if a.get('severity', None):
# XSOAR mapping
# Unknown: 0
# Informational: 0.5
# Low: 1
# Medium: 2
# High: 3
# Critical: 4
severity_from_portal = a['severity']
if severity_from_portal == 'Informational':
incident['severity'] = 0.5
elif severity_from_portal == 'Warning':
incident['severity'] = 1
elif severity_from_portal == 'Low':
incident['severity'] = 1
elif severity_from_portal == 'Medium':
incident['severity'] = 2
elif severity_from_portal == 'High':
incident['severity'] = 3
elif severity_from_portal == 'Critical':
incident['severity'] = 4
else:
incident['severity'] = 0
if a.get('analystDescription', None):
incident['details'] = a['analystDescription']
incidents.append(incident)
if created_time > latest_created_time:
latest_created_time = created_time
last_alert_id = a['id']
if BROKER:
next_run[f'{target_org}_last_fetch'] = latest_created_time.strftime(DATE_FORMAT)
next_run[f'{target_org}_last_alert_id'] = last_alert_id
else:
next_run['last_fetch'] = latest_created_time.strftime(DATE_FORMAT)
next_run['last_alert_id'] = last_alert_id
return next_run, incidents
| def fetch_incidents(last_run, first_run_time_range):
target_orgs = []
if BROKER:
orgs = list_org()
for org in orgs:
target_orgs.append(org['org_name'])
else:
target_orgs.append(None)
next_run = {}
incidents = []
for target_org in target_orgs:
if target_org:
last_fetch = last_run.get(f'{target_org}_last_fetch', None)
last_alert_id = last_run.get(f'{target_org}_last_alert_id', None)
else:
last_fetch = last_run.get('last_fetch', None)
last_alert_id = last_run.get('last_alert_id', None)
alert_time_max = datetime.utcnow()
if last_fetch is None:
alert_time_min = alert_time_max - timedelta(days=first_run_time_range)
else:
alert_time_min = dateparser.parse(last_fetch)
cov_alerts = list_alerts(target_org=target_org,
max_count=FETCH_LIMIT,
alert_time_min=alert_time_min.strftime(DATE_FORMAT),
alert_time_max=alert_time_max.strftime(DATE_FORMAT),
details='true')
latest_created_time = alert_time_min
for a in cov_alerts:
if a['id'] != last_alert_id:
created_time = datetime.utcfromtimestamp(a.get('createdTime', 0))
created_time_str = created_time.strftime(DATE_FORMAT)
if BROKER:
incident_name = f'''[{target_org}] [{a.get('type', 'No alert type')}] {a.get('analystTitle', 'No title')}'''
else:
incident_name = f'''[{a.get('type', 'No alert type')}] {a.get('analystTitle', 'No title')}'''
incident: Dict[str, Any] = {
'name': incident_name,
'occured': created_time_str,
'rawJSON': json.dumps(a)
}
if a.get('severity', None):
# XSOAR mapping
# Unknown: 0
# Informational: 0.5
# Low: 1
# Medium: 2
# High: 3
# Critical: 4
severity_from_portal = a['severity']
if severity_from_portal == 'Informational':
incident['severity'] = 0.5
elif severity_from_portal == 'Warning':
incident['severity'] = 1
elif severity_from_portal == 'Low':
incident['severity'] = 1
elif severity_from_portal == 'Medium':
incident['severity'] = 2
elif severity_from_portal == 'High':
incident['severity'] = 3
elif severity_from_portal == 'Critical':
incident['severity'] = 4
else:
incident['severity'] = 0
if a.get('analystDescription', None):
incident['details'] = a['analystDescription']
incidents.append(incident)
if created_time > latest_created_time:
latest_created_time = created_time
last_alert_id = a['id']
if BROKER:
next_run[f'{target_org}_last_fetch'] = latest_created_time.strftime(DATE_FORMAT)
next_run[f'{target_org}_last_alert_id'] = last_alert_id
else:
next_run['last_fetch'] = latest_created_time.strftime(DATE_FORMAT)
next_run['last_alert_id'] = last_alert_id
return next_run, incidents
|
43,704 | def observable(me_tables, init_term=0, mapping="jordan_wigner", wires=None):
r"""Builds the many-body observable whose expectation value can be
measured in PennyLane.
This function can be used to build second-quantized operators in the basis
of single-particle states (e.g., HF states) and to transform them into
PennyLane observables. In general, the many-body observable :math:`\hat{O}` can combine
one-particle and two-particle operators as it is the case for electronic Hamiltonians
.. math::
\hat{O} = \sum_{\alpha, \beta} \langle \alpha \vert \hat{t}^{(1)} + \hat{t}^{(2)}
\cdots + \hat{t}^{(n)} \vert \beta \rangle ~ \hat{c}_\alpha^\dagger \hat{c}_\beta
+ \frac{1}{2} \sum_{\alpha, \beta, \gamma, \delta}
\langle \alpha, \beta \vert \hat{v}^{(1)} + \hat{v}^{(2)} \cdots + \hat{v}^{(n)}
\vert \gamma, \delta \rangle ~ \hat{c}_\alpha^\dagger \hat{c}_\beta^\dagger
\hat{c}_\gamma \hat{c}_\delta.
In the latter equations the indices :math:`\alpha, \beta, \gamma, \delta` run over the
basis of single-particle states. The operators :math:`\hat{c}^\dagger` and :math:`\hat{c}`
are the particle creation and annihilation operators, respectively.
:math:`\langle \alpha \vert \hat{t} \vert \beta \rangle` denotes the matrix element of
the single-particle operator :math:`\hat{t}` entering the observable. For example,
in electronic structure calculations this is the case for the kinetic energy operator,
the nuclei Coulomb potential or any other external fields included in the model Hamiltonian.
On the other hand, :math:`\langle \alpha, \beta \vert \hat{v} \vert \gamma, \delta \rangle`
denotes the matrix element of the two-particle operator :math:`\hat{v}`, for example, the
Coulomb interaction between the electrons.
If an `active space <https://en.wikipedia.org/wiki/Complete_active_space>`_ is defined the
observable is expanded over the truncated basis of active orbitals. The contribution of
core orbitals, if any, can be passed to the function using the keyword argument ``init_term``.
The function utilizes tools of `OpenFermion <https://github.com/quantumlib/OpenFermion>`_
to build the second-quantized operator and map it to basis of Pauli matrices via the
Jordan-Wigner or Bravyi-Kitaev transformation. Finally, the qubit operator is
converted to a a PennyLane observable by the function :func:`~.convert_observable`.
Args:
me_tables (list(array[float])): list containing the tables of matrix elements
of the operators :math:`\hat{t}` and :math:`\hat{v}`.
For single-particle operators the :math:`ith` array in the list will have shape
``(me_tables[i].shape[0], 3)`` with each row containing the indices
:math:`\alpha`, :math:`\beta` and the matrix element
:math:`\langle \alpha \vert \hat{t}^{(i)}\vert \beta \rangle`.
For two-particle operators the :math:`jth` array in the list
will have shape ``(me_tables[j].shape[0], 5)`` with each row containing
the indices :math:`\alpha`, :math:`\beta`, :math:`\gamma`, :math:`\delta` and
the matrix element
:math:`\langle \alpha, \beta \vert \hat{v}^{(j)}\vert \gamma, \delta \rangle`.
init_term: the contribution of core orbitals, if any, or other quantity
required to initialize the many-body observable.
mapping (str): specifies the fermion-to-qubit mapping. Input values can
be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
pennylane.Hamiltonian: the fermionic-to-qubit transformed observable
**Example**
>>> t = np.array([[0., 0., 0.5], [1.0, 1.0, -0.5], [1.0, 0., 0.]])
>>> v = np.array([[ 0., 0., 0., 0., 0.25], [ 0., 1., 1., 0., -0.25], [ 1., 0., 0., 1., -0.5]])
>>> me_tables = []
>>> me_tables.append(t)
>>> me_tables.append(v)
>>> print(observable(me_tables, init_term=1/4, mapping="bravyi_kitaev"))
(0.0625) [I0]
+ (-0.0625) [Z0]
+ (0.4375) [Z0 Z1]
+ (-0.1875) [Z1]
>>> print(observable(me_tables, init_term=1/4, mapping="bravyi_kitaev", wires=['w0','w1']))
(0.0625) [Iw0]
+ (-0.0625) [Zw0]
+ (0.4375) [Zw0 Zw1]
+ (-0.1875) [Zw1]
"""
if mapping.strip().lower() not in ("jordan_wigner", "bravyi_kitaev"):
raise TypeError(
"The '{}' transformation is not available. \n "
"Please set 'mapping' to 'jordan_wigner' or 'bravyi_kitaev'.".format(mapping)
)
sp_op_shape = (3,)
tp_op_shape = (5,)
for table in me_tables:
for row in table:
if np.array(row).shape not in (sp_op_shape, tp_op_shape):
raise ValueError(
"Expected entries of matrix element tables to be of shape (3,) or (5,); got {}".format(
np.array(row).shape
)
)
# Initialize the FermionOperator
mb_obs = FermionOperator() + FermionOperator("") * init_term
for table in me_tables:
for i in table:
if i.shape == (5,):
# two-particle operator
mb_obs += FermionOperator(
((int(i[0]), 1), (int(i[1]), 1), (int(i[2]), 0), (int(i[3]), 0)), i[4]
)
elif i.shape == (3,):
# single-particle operator
mb_obs += FermionOperator(((int(i[0]), 1), (int(i[1]), 0)), i[2])
# Map the fermionic operator to a qubit operator
if mapping.strip().lower() == "bravyi_kitaev":
return structure.convert_observable(bravyi_kitaev(mb_obs), wires=wires)
return structure.convert_observable(jordan_wigner(mb_obs), wires=wires)
| def observable(me_tables, init_term=0, mapping="jordan_wigner", wires=None):
r"""Builds the many-body observable whose expectation value can be
measured in PennyLane.
This function can be used to build second-quantized operators in the basis
of single-particle states (e.g., HF states) and to transform them into
PennyLane observables. In general, the many-body observable :math:`\hat{O}` can combine
one-particle and two-particle operators as it is the case for electronic Hamiltonians
.. math::
\hat{O} = \sum_{\alpha, \beta} \langle \alpha \vert \hat{t}^{(1)} + \hat{t}^{(2)}
\cdots + \hat{t}^{(n)} \vert \beta \rangle ~ \hat{c}_\alpha^\dagger \hat{c}_\beta
+ \frac{1}{2} \sum_{\alpha, \beta, \gamma, \delta}
\langle \alpha, \beta \vert \hat{v}^{(1)} + \hat{v}^{(2)} \cdots + \hat{v}^{(n)}
\vert \gamma, \delta \rangle ~ \hat{c}_\alpha^\dagger \hat{c}_\beta^\dagger
\hat{c}_\gamma \hat{c}_\delta.
In the latter equations the indices :math:`\alpha, \beta, \gamma, \delta` run over the
basis of single-particle states. The operators :math:`\hat{c}^\dagger` and :math:`\hat{c}`
are the particle creation and annihilation operators, respectively.
:math:`\langle \alpha \vert \hat{t} \vert \beta \rangle` denotes the matrix element of
the single-particle operator :math:`\hat{t}` entering the observable. For example,
in electronic structure calculations this is the case for the kinetic energy operator,
the nuclei Coulomb potential or any other external fields included in the model Hamiltonian.
On the other hand, :math:`\langle \alpha, \beta \vert \hat{v} \vert \gamma, \delta \rangle`
denotes the matrix element of the two-particle operator :math:`\hat{v}`, for example, the
Coulomb interaction between the electrons.
If an `active space <https://en.wikipedia.org/wiki/Complete_active_space>`_ is defined the
observable is expanded over the truncated basis of active orbitals. The contribution of
core orbitals, if any, can be passed to the function using the keyword argument ``init_term``.
The function utilizes tools of `OpenFermion <https://github.com/quantumlib/OpenFermion>`_
to build the second-quantized operator and map it to basis of Pauli matrices via the
Jordan-Wigner or Bravyi-Kitaev transformation. Finally, the qubit operator is
converted to a a PennyLane observable by the function :func:`~.convert_observable`.
Args:
me_tables (list(array[float])): list containing the tables of matrix elements
of the operators :math:`\hat{t}` and :math:`\hat{v}`.
For single-particle operators the :math:`i`-th array in the list will have shape
``(me_tables[i].shape[0], 3)`` with each row containing the indices
:math:`\alpha`, :math:`\beta` and the matrix element
:math:`\langle \alpha \vert \hat{t}^{(i)}\vert \beta \rangle`.
For two-particle operators the :math:`jth` array in the list
will have shape ``(me_tables[j].shape[0], 5)`` with each row containing
the indices :math:`\alpha`, :math:`\beta`, :math:`\gamma`, :math:`\delta` and
the matrix element
:math:`\langle \alpha, \beta \vert \hat{v}^{(j)}\vert \gamma, \delta \rangle`.
init_term: the contribution of core orbitals, if any, or other quantity
required to initialize the many-body observable.
mapping (str): specifies the fermion-to-qubit mapping. Input values can
be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
pennylane.Hamiltonian: the fermionic-to-qubit transformed observable
**Example**
>>> t = np.array([[0., 0., 0.5], [1.0, 1.0, -0.5], [1.0, 0., 0.]])
>>> v = np.array([[ 0., 0., 0., 0., 0.25], [ 0., 1., 1., 0., -0.25], [ 1., 0., 0., 1., -0.5]])
>>> me_tables = []
>>> me_tables.append(t)
>>> me_tables.append(v)
>>> print(observable(me_tables, init_term=1/4, mapping="bravyi_kitaev"))
(0.0625) [I0]
+ (-0.0625) [Z0]
+ (0.4375) [Z0 Z1]
+ (-0.1875) [Z1]
>>> print(observable(me_tables, init_term=1/4, mapping="bravyi_kitaev", wires=['w0','w1']))
(0.0625) [Iw0]
+ (-0.0625) [Zw0]
+ (0.4375) [Zw0 Zw1]
+ (-0.1875) [Zw1]
"""
if mapping.strip().lower() not in ("jordan_wigner", "bravyi_kitaev"):
raise TypeError(
"The '{}' transformation is not available. \n "
"Please set 'mapping' to 'jordan_wigner' or 'bravyi_kitaev'.".format(mapping)
)
sp_op_shape = (3,)
tp_op_shape = (5,)
for table in me_tables:
for row in table:
if np.array(row).shape not in (sp_op_shape, tp_op_shape):
raise ValueError(
"Expected entries of matrix element tables to be of shape (3,) or (5,); got {}".format(
np.array(row).shape
)
)
# Initialize the FermionOperator
mb_obs = FermionOperator() + FermionOperator("") * init_term
for table in me_tables:
for i in table:
if i.shape == (5,):
# two-particle operator
mb_obs += FermionOperator(
((int(i[0]), 1), (int(i[1]), 1), (int(i[2]), 0), (int(i[3]), 0)), i[4]
)
elif i.shape == (3,):
# single-particle operator
mb_obs += FermionOperator(((int(i[0]), 1), (int(i[1]), 0)), i[2])
# Map the fermionic operator to a qubit operator
if mapping.strip().lower() == "bravyi_kitaev":
return structure.convert_observable(bravyi_kitaev(mb_obs), wires=wires)
return structure.convert_observable(jordan_wigner(mb_obs), wires=wires)
|
50,147 | def _is_applicable(
module_details: ModuleDetails, cfg: dict, schema: Optional[dict] = None
) -> bool:
if not module_details.module.meta.get("skippable", False):
return True
normalized_name = f"cc_{module_details.name}".replace("-", "_")
config_keys = get_config_keys(normalized_name, schema=schema)
if not config_keys.intersection(cfg.keys()):
return False
return True
| def _is_applicable(
module_details: ModuleDetails, cfg: dict, schema: Optional[dict] = None
) -> bool:
if not module_details.module.meta.get("skip_on_inapplicable_schema", False):
return True
normalized_name = f"cc_{module_details.name}".replace("-", "_")
config_keys = get_config_keys(normalized_name, schema=schema)
if not config_keys.intersection(cfg.keys()):
return False
return True
|
17,896 | def get_variation_from_env(envvar_value):
"""Return a tuple with variation data from the specific environment
variable key.
Raise an exception if the passed in envvar doesn't look something
like: '0 100'
"""
try:
# We want to create a tuple of integers from a string containing
# integers. Anything else should throw.
rv = tuple(int(x) for x in envvar_value.strip().split(' '))
if (len(rv) is not 2):
raise Exception('The format is incorrect. Expected "{int} {int}"?')
except Exception as e:
print("Something went wrong: {0}".format(e))
raise e
return rv
| def get_variation_from_env(envvar_value):
"""Return a tuple with variation data from the specific environment
variable key.
Raise an exception if the passed in envvar doesn't look something
like: '0 100'
"""
try:
# We want to create a tuple of integers from a string containing
# integers. Anything else should throw.
rv = tuple(int(x) for x in envvar_value.strip().split(' '))
if (len(rv) == 2):
raise Exception('The format is incorrect. Expected "{int} {int}"?')
except Exception as e:
print("Something went wrong: {0}".format(e))
raise e
return rv
|
28,090 | def journal_entry(cmdr, is_beta, system, station, entry, state):
# Always update, even if we're not the *current* system or station provider.
this.system_address = entry.get('SystemAddress') or this.system_address
this.system = entry.get('StarSystem') or this.system
# We need pop == 0 to set the value so as to clear 'x' in systems with
# no stations.
pop = entry.get('Population')
if pop is not None:
this.system_population = pop
this.station = entry.get('StationName') or this.station
this.station_marketid = entry.get('MarketID') or this.station_marketid
# We might pick up StationName in DockingRequested, make sure we clear it if leaving
if entry['event'] in ('Undocked', 'FSDJump', 'SupercruiseEntry'):
this.station = None
this.station_marketid = None
# Only actually change URLs if we are current provider.
if config.get('system_provider') == 'eddb':
this.system_link['text'] = this.system
this.system_link['url'] = system_url(this.system) # Override standard URL function
this.system_link.update_idletasks()
# But only actually change the URL if we are current station provider.
if config.get('station_provider') == 'eddb':
this.station_link['text'] = this.station or (this.system_population and this.system_population > 0 and STATION_UNDOCKED or '')
this.station_link['url'] = station_url(this.system, this.station) # Override standard URL function
this.station_link.update_idletasks()
| def journal_entry(cmdr, is_beta, system, station, entry, state):
# Always update, even if we're not the *current* system or station provider.
this.system_address = entry.get('SystemAddress') or this.system_address
this.system = entry.get('StarSystem') or this.system
# We need pop == 0 to set the value so as to clear 'x' in systems with
# no stations.
pop = entry.get('Population')
if pop is not None:
this.system_population = pop
this.station = entry.get('StationName') or this.station
this.station_marketid = entry.get('MarketID') or this.station_marketid
this.station_marketid = entry.get('MarketID', this.station_marketid)
if entry['event'] in ('Undocked', 'FSDJump', 'SupercruiseEntry'):
this.station = None
this.station_marketid = None
# Only actually change URLs if we are current provider.
if config.get('system_provider') == 'eddb':
this.system_link['text'] = this.system
this.system_link['url'] = system_url(this.system) # Override standard URL function
this.system_link.update_idletasks()
# But only actually change the URL if we are current station provider.
if config.get('station_provider') == 'eddb':
this.station_link['text'] = this.station or (this.system_population and this.system_population > 0 and STATION_UNDOCKED or '')
this.station_link['url'] = station_url(this.system, this.station) # Override standard URL function
this.station_link.update_idletasks()
|
2,251 | def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
Y = np.array([[1, 0], [0, 1]])
warning_message = 'will be ignored'
with pytest.warns(UserWarning, match=warning_message):
matrix = mlb.fit(y).transform([[4, 1], [2, 0]])
Y = np.array([[1, 0, 0], [0, 1, 0]])
mlb = MultiLabelBinarizer(classes=[1, 2, 3])
with pytest.warns(UserWarning, match=warning_message):
matrix = mlb.fit(y).transform([[4, 1], [2, 0]])
assert_array_equal(matrix, Y)
| def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
Y = np.array([[1, 0], [0, 1]])
warning_message = 'unknown class.* will be ignored'
with pytest.warns(UserWarning, match=warning_message):
matrix = mlb.fit(y).transform([[4, 1], [2, 0]])
Y = np.array([[1, 0, 0], [0, 1, 0]])
mlb = MultiLabelBinarizer(classes=[1, 2, 3])
with pytest.warns(UserWarning, match=warning_message):
matrix = mlb.fit(y).transform([[4, 1], [2, 0]])
assert_array_equal(matrix, Y)
|
24,744 | def _has_different_parameter_names_only(original, overridden):
"""Check if the original and the overridden parameters have different names
but the same type.If they do, raise arguments-renamed. In any other case
return False.
Args:
original (astroid.FunctionDef): original function's definition.
overridden (astroid.FunctionDef): overridden function's definition.
Returns:
True or False: whether it is arguments-renamed or not.
"""
if len(original.args.args) != len(overridden.args.args):
return False
zipped_names = list(zip_longest(original.args.args, overridden.args.args))
zipped_types = zip_longest(original.args.annotations, overridden.args.annotations)
counter = 0
for original_type, overridden_type in zipped_types:
if str(original_type) == str(overridden_type): #type has not changed
if str(zipped_names[counter][0]) != str(zipped_names[counter][1]): #name has changed
return True
counter +=1
return False
| def _has_different_parameter_names_only(original: astroid.FunctionDef, overridden: astroid.FunctionDef) -> bool:
"""Check if the original and the overridden parameters have different names
but the same type.If they do, raise arguments-renamed. In any other case
return False.
Args:
original (astroid.FunctionDef): original function's definition.
overridden (astroid.FunctionDef): overridden function's definition.
Returns:
True or False: whether it is arguments-renamed or not.
"""
if len(original.args.args) != len(overridden.args.args):
return False
zipped_names = list(zip_longest(original.args.args, overridden.args.args))
zipped_types = zip_longest(original.args.annotations, overridden.args.annotations)
counter = 0
for original_type, overridden_type in zipped_types:
if str(original_type) == str(overridden_type): #type has not changed
if str(zipped_names[counter][0]) != str(zipped_names[counter][1]): #name has changed
return True
counter +=1
return False
|
49,660 | def get_terminal_width() -> int:
"""Get number of columns of the terminal."""
return shutil.get_terminal_size().columns - 1
| def get_terminal_width() -> int:
"""Return the width of the terminal window.
The width of the terminal connected to sys.__stdout__ is obtained via
os.get_terminal_size. If the terminal size cannot be determined, the
environment variable COLUMNS is used if defined. Otherwise, fallback value
is returned.
"""
try:
return os.get_terminal_size(sys.__stdout__.fileno()).columns - 1
except (AttributeError, ValueError, OSError):
# FALLBACK
return int(os.environ.get('COLUMNS', 80)) - 1
|
22,840 | def _modify_relative_data(relative_data, replace_spaces=False):
modified_relative_data = OrderedDict()
for key, value in relative_data.items():
for i, string in enumerate(value):
string = RELATIVE_PATTERN.sub(r'(\\d+)', string)
if replace_spaces:
string = re.sub(r'\s+', '\\s?', string)
value[i] = string
modified_relative_data[key] = value
return modified_relative_data
| def _modify_relative_data(relative_data, replace_spaces=False):
modified_relative_data = OrderedDict()
for key, value in relative_data.items():
for i, string in enumerate(value):
string = RELATIVE_PATTERN.sub(r'(\\d+)', string)
if replace_spaces:
string = re.sub(r'\s+', '\\s*', string)
value[i] = string
modified_relative_data[key] = value
return modified_relative_data
|
56,644 | def sortable_lcc_to_short_lcc(lcc):
"""
As close to the inverse of make_sortable_lcc as possible
:param basestring lcc:
:rtype: basestring
"""
m = LCC_PARTS_RE.match(lcc)
parts = m.groupdict()
parts['letters'] = parts['letters'].strip('-')
parts['number'] = parts['number'].strip('0').strip('.') # Need to do in order!
parts['cutter1'] = parts['cutter1'].strip(' ') if parts['cutter1'] else ''
parts['rest'] = ' ' + parts['rest'].strip() if parts['rest'] else ''
return '%(letters)s%(number)s%(cutter1)s%(rest)s' % parts
| def sortable_lcc_to_short_lcc(lcc):
"""
As close to the inverse of make_sortable_lcc as possible
:param basestring lcc:
:rtype: basestring
"""
m = LCC_PARTS_RE.match(lcc)
parts = m.groupdict()
parts['letters'] = parts['letters'].strip('-')
parts['number'] = parts['number'].strip('0').strip('.') # Need to do in order!
parts['cutter1'] = (parts.get('cutter1') or '').strip(' ')
parts['rest'] = ' ' + (parts.get('rest') or '').strip()
return '%(letters)s%(number)s%(cutter1)s%(rest)s' % parts
|
4,812 | def _get_config_or_cache_dir(xdg_base):
configdir = os.environ.get('MPLCONFIGDIR')
if configdir:
configdir = Path(configdir).resolve()
elif sys.platform.startswith(('linux', 'freebsd')) and xdg_base:
configdir = Path(xdg_base, "matplotlib")
else:
configdir = Path.home() / ".matplotlib"
try:
configdir.mkdir(parents=True, exist_ok=True)
except OSError:
pass
else:
if os.access(str(configdir), os.W_OK) and configdir.is_dir():
return str(configdir)
# If the config or cache directory cannot be created or is not a writable
# directory, create a temporary one.
tmpdir = os.environ["MPLCONFIGDIR"] = \
tempfile.mkdtemp(prefix="matplotlib-")
atexit.register(shutil.rmtree, tmpdir)
_log.warning(
"Matplotlib created a temporary config/cache directory at %s because "
"the default path (%s) is not a writable directory; it is recommended "
"to set the MPLCONFIGDIR environment variable to such a directory.",
configdir, tmpdir)
return tmpdir
| def _get_config_or_cache_dir(xdg_base):
configdir = os.environ.get('MPLCONFIGDIR')
if configdir:
configdir = Path(configdir).resolve()
elif sys.platform.startswith(('linux', 'freebsd')) and xdg_base:
configdir = Path(xdg_base, "matplotlib")
else:
configdir = Path.home() / ".matplotlib"
try:
configdir.mkdir(parents=True, exist_ok=True)
except OSError:
pass
else:
if os.access(str(configdir), os.W_OK) and configdir.is_dir():
return str(configdir)
# If the config or cache directory cannot be created or is not a writable
# directory, create a temporary one.
tmpdir = os.environ["MPLCONFIGDIR"] = \
tempfile.mkdtemp(prefix="matplotlib-")
atexit.register(shutil.rmtree, tmpdir)
_log.warning(
"Matplotlib created a temporary config/cache directory at %s because "
"the default path (%s) is not a writable directory; it is recommended "
"to set the MPLCONFIGDIR environment variable to a writable directory.",
configdir, tmpdir)
return tmpdir
|
53,472 | def iterator_suffix(iterator, stop: int):
for i, item in enumerate(iterator):
if i < stop: # [no-else-continue]
continue
else:
yield item
| def even_number_under(n: int):
for i in range(n):
if i%2 == 1: # [no-else-continue]
continue
else:
yield i
|
27,984 | def check(check_data):
"""
Invoke clang with an action which called by processes.
Different analyzer object belongs to for each build action.
skiplist handler is None if no skip file was configured.
"""
actions_map, action, context, analyzer_config, \
output_dir, skip_handler, quiet_output_on_stdout, \
capture_analysis_output, analysis_timeout, \
analyzer_environment, ctu_reanalyze_on_failure, \
output_dirs, statistics_data = check_data
failed_dir = output_dirs["failed"]
success_dir = output_dirs["success"]
try:
# If one analysis fails the check fails.
return_codes = 0
reanalyzed = False
result_file = ''
if analyzer_config is None:
raise Exception("Analyzer configuration is missing.")
source_analyzer, analyzer_cmd, rh, reanalyzed = \
prepare_check(action, analyzer_config,
output_dir, context.severity_map,
skip_handler, statistics_data)
# The analyzer invocation calls __create_timeout as a callback
# when the analyzer starts. This callback creates the timeout
# watcher over the analyzer process, which in turn returns a
# function, that can later be used to check if the analyzer quit
# because we killed it due to a timeout.
#
# We need to capture the "function pointer" returned by
# setup_process_timeout as reference, so that we may call it
# later. To work around scoping issues, we use a list here so the
# "function pointer" is captured by reference.
timeout_cleanup = [lambda: False]
if analysis_timeout and analysis_timeout > 0:
def __create_timeout(analyzer_process):
"""
Once the analyzer process is started, this method is
called. Set up a timeout for the analysis.
"""
timeout_cleanup[0] = setup_process_timeout(
analyzer_process, analysis_timeout)
else:
def __create_timeout(analyzer_process):
# If no timeout is given by the client, this callback
# shouldn't do anything.
pass
result_file_exists = os.path.exists(rh.analyzer_result_file)
# Fills up the result handler with the analyzer information.
source_analyzer.analyze(analyzer_cmd, rh, analyzer_environment,
__create_timeout)
# If execution reaches this line, the analyzer process has quit.
if timeout_cleanup[0]():
LOG.warning("Analyzer ran too long, exceeding time limit "
"of %d seconds.", analysis_timeout)
LOG.warning("Considering this analysis as failed...")
rh.analyzer_returncode = -1
rh.analyzer_stderr = (">>> CodeChecker: Analysis timed out "
"after {0} seconds. <<<\n{1}") \
.format(analysis_timeout, rh.analyzer_stderr)
# If source file contains escaped spaces ("\ " tokens), then
# clangSA writes the plist file with removing this escape
# sequence, whereas clang-tidy does not. We rewrite the file
# names to contain no escape sequences for every analyzer.
result_file = rh.analyzer_result_file.replace(r'\ ', ' ')
result_base = os.path.basename(result_file)
ctu_active = is_ctu_active(source_analyzer)
ctu_suffix = '_CTU'
zip_suffix = ctu_suffix if ctu_active else ''
failure_type = "_unknown"
if rh.analyzer_returncode == 1:
failure_type = "_compile_error"
elif rh.analyzer_returncode == 254:
failure_type = "_crash"
zip_file = result_base + zip_suffix + failure_type + '.zip'
zip_file = os.path.join(failed_dir, zip_file)
ctu_zip_file = result_base + ctu_suffix + failure_type + '.zip'
ctu_zip_file = os.path.join(failed_dir, ctu_zip_file)
return_codes = rh.analyzer_returncode
source_file_name = os.path.basename(action.source)
# Remove the previously generated error file.
if os.path.exists(zip_file):
os.remove(zip_file)
# Remove the previously generated CTU error file.
if os.path.exists(ctu_zip_file):
os.remove(ctu_zip_file)
if rh.analyzer_returncode == 0:
handle_success(rh, result_file, result_base,
skip_handler, capture_analysis_output,
success_dir)
LOG.info("[%d/%d] %s analyzed %s successfully.",
progress_checked_num.value, progress_actions.value,
action.analyzer_type, source_file_name)
if result_file_exists:
LOG.warning("Previous analysis results in '%s' has been "
"overwritten.", rh.analyzer_result_file)
if skip_handler:
# We need to check the plist content because skipping
# reports in headers can be done only this way.
plist_parser.skip_report_from_plist(result_file,
skip_handler)
else:
LOG.error("Analyzing %s with %s %s failed!",
source_file_name,
action.analyzer_type,
"CTU" if ctu_active else "")
if not quiet_output_on_stdout:
LOG.error("\n%s", rh.analyzer_stdout)
LOG.error("\n%s", rh.analyzer_stderr)
handle_failure(source_analyzer, rh, zip_file, result_base,
actions_map)
if ctu_active and ctu_reanalyze_on_failure:
LOG.error("Try to reanalyze without CTU")
# Try to reanalyze with CTU disabled.
source_analyzer, analyzer_cmd, rh, reanalyzed = \
prepare_check(action,
analyzer_config,
output_dir,
context.severity_map,
skip_handler,
statistics_data,
True)
# Fills up the result handler with
# the analyzer information.
source_analyzer.analyze(analyzer_cmd,
rh,
analyzer_environment)
return_codes = rh.analyzer_returncode
if rh.analyzer_returncode == 0:
handle_success(rh, result_file, result_base,
skip_handler, capture_analysis_output,
success_dir)
LOG.info("[%d/%d] %s analyzed %s without"
" CTU successfully.",
progress_checked_num.value,
progress_actions.value,
action.analyzer_type,
source_file_name)
if result_file_exists:
LOG.warning("Previous analysis results in '%s' has "
"been overwritten.",
rh.analyzer_result_file)
else:
LOG.error("Analyzing '%s' with %s without CTU failed.",
source_file_name, action.analyzer_type)
zip_file = result_base + '.zip'
zip_file = os.path.join(failed_dir, zip_file)
handle_failure(source_analyzer, rh, zip_file,
result_base, actions_map)
if not quiet_output_on_stdout:
if rh.analyzer_returncode:
LOG.error('\n%s', rh.analyzer_stdout)
LOG.error('\n%s', rh.analyzer_stderr)
else:
LOG.debug_analyzer('\n%s', rh.analyzer_stdout)
LOG.debug_analyzer('\n%s', rh.analyzer_stderr)
progress_checked_num.value += 1
return return_codes, False, reanalyzed, action.analyzer_type, \
result_file, action.source
except Exception as e:
LOG.debug_analyzer(str(e))
traceback.print_exc(file=sys.stdout)
return 1, False, reanalyzed, action.analyzer_type, None, \
action.source
| def check(check_data):
"""
Invoke clang with an action which called by processes.
Different analyzer object belongs to for each build action.
skiplist handler is None if no skip file was configured.
"""
actions_map, action, context, analyzer_config, \
output_dir, skip_handler, quiet_output_on_stdout, \
capture_analysis_output, analysis_timeout, \
analyzer_environment, ctu_reanalyze_on_failure, \
output_dirs, statistics_data = check_data
failed_dir = output_dirs["failed"]
success_dir = output_dirs["success"]
try:
# If one analysis fails the check fails.
return_codes = 0
reanalyzed = False
result_file = ''
if not analyzer_config:
raise Exception("Analyzer configuration is missing.")
source_analyzer, analyzer_cmd, rh, reanalyzed = \
prepare_check(action, analyzer_config,
output_dir, context.severity_map,
skip_handler, statistics_data)
# The analyzer invocation calls __create_timeout as a callback
# when the analyzer starts. This callback creates the timeout
# watcher over the analyzer process, which in turn returns a
# function, that can later be used to check if the analyzer quit
# because we killed it due to a timeout.
#
# We need to capture the "function pointer" returned by
# setup_process_timeout as reference, so that we may call it
# later. To work around scoping issues, we use a list here so the
# "function pointer" is captured by reference.
timeout_cleanup = [lambda: False]
if analysis_timeout and analysis_timeout > 0:
def __create_timeout(analyzer_process):
"""
Once the analyzer process is started, this method is
called. Set up a timeout for the analysis.
"""
timeout_cleanup[0] = setup_process_timeout(
analyzer_process, analysis_timeout)
else:
def __create_timeout(analyzer_process):
# If no timeout is given by the client, this callback
# shouldn't do anything.
pass
result_file_exists = os.path.exists(rh.analyzer_result_file)
# Fills up the result handler with the analyzer information.
source_analyzer.analyze(analyzer_cmd, rh, analyzer_environment,
__create_timeout)
# If execution reaches this line, the analyzer process has quit.
if timeout_cleanup[0]():
LOG.warning("Analyzer ran too long, exceeding time limit "
"of %d seconds.", analysis_timeout)
LOG.warning("Considering this analysis as failed...")
rh.analyzer_returncode = -1
rh.analyzer_stderr = (">>> CodeChecker: Analysis timed out "
"after {0} seconds. <<<\n{1}") \
.format(analysis_timeout, rh.analyzer_stderr)
# If source file contains escaped spaces ("\ " tokens), then
# clangSA writes the plist file with removing this escape
# sequence, whereas clang-tidy does not. We rewrite the file
# names to contain no escape sequences for every analyzer.
result_file = rh.analyzer_result_file.replace(r'\ ', ' ')
result_base = os.path.basename(result_file)
ctu_active = is_ctu_active(source_analyzer)
ctu_suffix = '_CTU'
zip_suffix = ctu_suffix if ctu_active else ''
failure_type = "_unknown"
if rh.analyzer_returncode == 1:
failure_type = "_compile_error"
elif rh.analyzer_returncode == 254:
failure_type = "_crash"
zip_file = result_base + zip_suffix + failure_type + '.zip'
zip_file = os.path.join(failed_dir, zip_file)
ctu_zip_file = result_base + ctu_suffix + failure_type + '.zip'
ctu_zip_file = os.path.join(failed_dir, ctu_zip_file)
return_codes = rh.analyzer_returncode
source_file_name = os.path.basename(action.source)
# Remove the previously generated error file.
if os.path.exists(zip_file):
os.remove(zip_file)
# Remove the previously generated CTU error file.
if os.path.exists(ctu_zip_file):
os.remove(ctu_zip_file)
if rh.analyzer_returncode == 0:
handle_success(rh, result_file, result_base,
skip_handler, capture_analysis_output,
success_dir)
LOG.info("[%d/%d] %s analyzed %s successfully.",
progress_checked_num.value, progress_actions.value,
action.analyzer_type, source_file_name)
if result_file_exists:
LOG.warning("Previous analysis results in '%s' has been "
"overwritten.", rh.analyzer_result_file)
if skip_handler:
# We need to check the plist content because skipping
# reports in headers can be done only this way.
plist_parser.skip_report_from_plist(result_file,
skip_handler)
else:
LOG.error("Analyzing %s with %s %s failed!",
source_file_name,
action.analyzer_type,
"CTU" if ctu_active else "")
if not quiet_output_on_stdout:
LOG.error("\n%s", rh.analyzer_stdout)
LOG.error("\n%s", rh.analyzer_stderr)
handle_failure(source_analyzer, rh, zip_file, result_base,
actions_map)
if ctu_active and ctu_reanalyze_on_failure:
LOG.error("Try to reanalyze without CTU")
# Try to reanalyze with CTU disabled.
source_analyzer, analyzer_cmd, rh, reanalyzed = \
prepare_check(action,
analyzer_config,
output_dir,
context.severity_map,
skip_handler,
statistics_data,
True)
# Fills up the result handler with
# the analyzer information.
source_analyzer.analyze(analyzer_cmd,
rh,
analyzer_environment)
return_codes = rh.analyzer_returncode
if rh.analyzer_returncode == 0:
handle_success(rh, result_file, result_base,
skip_handler, capture_analysis_output,
success_dir)
LOG.info("[%d/%d] %s analyzed %s without"
" CTU successfully.",
progress_checked_num.value,
progress_actions.value,
action.analyzer_type,
source_file_name)
if result_file_exists:
LOG.warning("Previous analysis results in '%s' has "
"been overwritten.",
rh.analyzer_result_file)
else:
LOG.error("Analyzing '%s' with %s without CTU failed.",
source_file_name, action.analyzer_type)
zip_file = result_base + '.zip'
zip_file = os.path.join(failed_dir, zip_file)
handle_failure(source_analyzer, rh, zip_file,
result_base, actions_map)
if not quiet_output_on_stdout:
if rh.analyzer_returncode:
LOG.error('\n%s', rh.analyzer_stdout)
LOG.error('\n%s', rh.analyzer_stderr)
else:
LOG.debug_analyzer('\n%s', rh.analyzer_stdout)
LOG.debug_analyzer('\n%s', rh.analyzer_stderr)
progress_checked_num.value += 1
return return_codes, False, reanalyzed, action.analyzer_type, \
result_file, action.source
except Exception as e:
LOG.debug_analyzer(str(e))
traceback.print_exc(file=sys.stdout)
return 1, False, reanalyzed, action.analyzer_type, None, \
action.source
|
11,794 | def export_regions(objs):
"""
Convenience function to convert a sequence of Ginga canvas objects
to a ds9 file containing regions and
return a list of matching .
Parameters
----------
objs : seq of subclasses of `~ginga.canvas.CanvasObject.CanvasObjectBase`
Sequence of Ginga canvas objects compatible with Regions
Returns
-------
regions : `~regions.Regions` object
Returns an astropy regions Regions object
"""
regs = regions.Regions(map(ginga_canvas_object_to_astropy_region, objs))
return regs
| def export_regions(objs):
"""
Convenience function to convert a sequence of Ginga canvas objects
to a ds9 file containing regions and
return a list of matching regions.
Parameters
----------
objs : seq of subclasses of `~ginga.canvas.CanvasObject.CanvasObjectBase`
Sequence of Ginga canvas objects compatible with Regions
Returns
-------
regions : `~regions.Regions` object
Returns an astropy regions Regions object
"""
regs = regions.Regions(map(ginga_canvas_object_to_astropy_region, objs))
return regs
|
40,128 | def log_current_packages(packages: Tuple[str], install: bool = True):
'''
Log which packages are installed or removed.
:param packages: List of packages that are affected.
:param install: Identifier to distinguish installation from removal.
'''
action = 'Installing' if install else 'Removing'
logging.info(f"{action} {' '.join(packages)}")
| def log_current_packages(packages: Tuple[str], install: bool = True):
'''
Log which packages are installed or removed.
:param packages: List of packages that are affected.
:param install: Identifier to distinguish installation from removal.
'''
logging.info(f'{action} {" ".join(packages)}')
logging.info(f"{action} {' '.join(packages)}")
|
53,863 | def process_alias(io, data, name, print_name=False):
return process_rr(io, data, 'ALIAS', 'target-resource-id', name, print_name) | def process_alias(io, data, name, print_name=False):
return process_rr(io, data, 'ALIAS', 'target-resource-id', name, print_name)
|
12,493 | def is_definitely_not_enum_member(name: str, typ: Optional[Type]) -> bool:
"""
Return `True` if we are certain that an object inside an enum class statement
will not be converted to become a member of the enum.
The following things are not converted:
1. Assignments with private names like in `__prop = 1`
2. Assignments with dunder names like `__hash__ = some_hasher`
3. Assignments with sunder names like `_order_ = 'a, b, c'`
4. Assignments to methods/descriptors like in `method = classmethod(func)`
"""
if is_private(name) or is_dunder(name) or is_sunder(name):
return True
if typ is None:
return False
proper_type = get_proper_type(typ)
return isinstance(proper_type, FunctionLike) or (
isinstance(proper_type, Instance) and proper_type.type.is_descriptor
)
| def is_definitely_not_enum_member(name: str, typ: Type | None) -> bool:
"""
Return `True` if we are certain that an object inside an enum class statement
will not be converted to become a member of the enum.
The following things are not converted:
1. Assignments with private names like in `__prop = 1`
2. Assignments with dunder names like `__hash__ = some_hasher`
3. Assignments with sunder names like `_order_ = 'a, b, c'`
4. Assignments to methods/descriptors like in `method = classmethod(func)`
"""
if is_private(name) or is_dunder(name) or is_sunder(name):
return True
if typ is None:
return False
proper_type = get_proper_type(typ)
return isinstance(proper_type, FunctionLike) or (
isinstance(proper_type, Instance) and proper_type.type.is_descriptor
)
|
45,870 | def tiltProjection(taux: torch.Tensor, tauy: torch.Tensor, inv: bool = False) -> torch.Tensor:
r"""Estimate the tilt projection matrix or the inverse tilt projection matrix
Args:
taux (torch.Tensor): Rotation angle in radians around the :math:`x`-axis with shape :math:`(*, 1)`.
tauy (torch.Tensor): Rotation angle in radians around the :math:`y`-axis with shape :math:`(*, 1)`.
inv (bool): False to obtain the the tilt projection matrix. False for the inverse matrix
Returns:
torch.Tensor: Inverse tilt projection matrix with shape :math:`(*, 3, 3)`.
"""
assert taux.dim() == tauy.dim()
assert taux.numel() == tauy.numel()
ndim = taux.dim()
taux = taux.reshape(-1)
tauy = tauy.reshape(-1)
cTx = torch.cos(taux)
sTx = torch.sin(taux)
cTy = torch.cos(tauy)
sTy = torch.sin(tauy)
zero = torch.zeros_like(cTx)
one = torch.ones_like(cTx)
Rx = torch.stack([one, zero, zero, zero, cTx, sTx, zero, -sTx, cTx], -1).reshape(-1, 3, 3)
Ry = torch.stack([cTy, zero, -sTy, zero, one, zero, sTy, zero, cTy], -1).reshape(-1, 3, 3)
R = Ry @ Rx
if inv:
invR22 = 1 / R[..., 2, 2]
invPz = torch.stack(
[invR22, zero, R[..., 0, 2] * invR22,
zero, invR22, R[..., 1, 2] * invR22,
zero, zero, one], -1
).reshape(-1, 3, 3)
invTilt = R.transpose(-1, -2) @ invPz
if ndim == 0:
invTilt = torch.squeeze(invTilt)
return invTilt
else:
Pz = torch.stack(
[R[..., 2, 2], zero, -R[..., 0, 2],
zero, R[..., 2, 2], -R[..., 1, 2],
zero, zero, one], -1
).reshape(-1, 3, 3)
tilt = Pz @ R.transpose(-1, -2)
if ndim == 0:
tilt = torch.squeeze(tilt)
return tilt
| def tiltProjection(taux: torch.Tensor, tauy: torch.Tensor, inv: bool = False) -> torch.Tensor:
r"""Estimate the tilt projection matrix or the inverse tilt projection matrix
Args:
taux (torch.Tensor): Rotation angle in radians around the :math:`x`-axis with shape :math:`(*, 1)`.
tauy (torch.Tensor): Rotation angle in radians around the :math:`y`-axis with shape :math:`(*, 1)`.
inv (bool): False to obtain the the tilt projection matrix. False for the inverse matrix
Returns:
torch.Tensor: Inverse tilt projection matrix with shape :math:`(*, 3, 3)`.
"""
assert taux.dim() == tauy.dim()
assert taux.numel() == tauy.numel()
ndim: int = len(taux.shape)
taux = taux.reshape(-1)
tauy = tauy.reshape(-1)
cTx = torch.cos(taux)
sTx = torch.sin(taux)
cTy = torch.cos(tauy)
sTy = torch.sin(tauy)
zero = torch.zeros_like(cTx)
one = torch.ones_like(cTx)
Rx = torch.stack([one, zero, zero, zero, cTx, sTx, zero, -sTx, cTx], -1).reshape(-1, 3, 3)
Ry = torch.stack([cTy, zero, -sTy, zero, one, zero, sTy, zero, cTy], -1).reshape(-1, 3, 3)
R = Ry @ Rx
if inv:
invR22 = 1 / R[..., 2, 2]
invPz = torch.stack(
[invR22, zero, R[..., 0, 2] * invR22,
zero, invR22, R[..., 1, 2] * invR22,
zero, zero, one], -1
).reshape(-1, 3, 3)
invTilt = R.transpose(-1, -2) @ invPz
if ndim == 0:
invTilt = torch.squeeze(invTilt)
return invTilt
else:
Pz = torch.stack(
[R[..., 2, 2], zero, -R[..., 0, 2],
zero, R[..., 2, 2], -R[..., 1, 2],
zero, zero, one], -1
).reshape(-1, 3, 3)
tilt = Pz @ R.transpose(-1, -2)
if ndim == 0:
tilt = torch.squeeze(tilt)
return tilt
|
31,092 | def check_if_incident_was_modified_in_xdr(incident_id):
demisto_incident = demisto.get_incidents()[0]
last_mirrored_in_time = demisto_incident.get('CustomFields', {}).get('lastmirroredintime')
last_mirrored_in_time_timestamp = arg_to_timestamp(last_mirrored_in_time, 'last_mirrored_in_time')
last_modified_incidents_dict = get_integration_context().get('modified_incidents', {})
if incident_id in last_modified_incidents_dict: # search the incident in the dict of modified incidents
incident_modification_time_in_xdr = int(str(last_modified_incidents_dict[incident_id]))
demisto.info(f"XDR incident {incident_id}\n"
f"modified time: {incident_modification_time_in_xdr}\n"
f"last mirrored in time: {last_mirrored_in_time_timestamp}")
if incident_modification_time_in_xdr > last_mirrored_in_time_timestamp: # need to update this incident
demisto.info(f"Incident '{incident_id}' was modified. performing extra-data request.")
return True
else: # the incident was not modified
return False
| def check_if_incident_was_modified_in_xdr(incident_id):
demisto_incident = demisto.get_incidents()[0]
last_mirrored_in_time = demisto_incident.get('CustomFields', {}).get('lastmirroredintime')
last_mirrored_in_time_timestamp = arg_to_timestamp(last_mirrored_in_time, 'last_mirrored_in_time')
last_modified_incidents_dict = get_integration_context().get('modified_incidents', {})
if incident_id in last_modified_incidents_dict: # search the incident in the dict of modified incidents
incident_modification_time_in_xdr = int(str(last_modified_incidents_dict[incident_id]))
demisto.debug(f"XDR incident {incident_id}\n"
f"modified time: {incident_modification_time_in_xdr}\n"
f"last mirrored in time: {last_mirrored_in_time_timestamp}")
if incident_modification_time_in_xdr > last_mirrored_in_time_timestamp: # need to update this incident
demisto.info(f"Incident '{incident_id}' was modified. performing extra-data request.")
return True
else: # the incident was not modified
return False
|
58,207 | def patched_api_call(original_func, instance, args, kwargs):
pin = Pin.get_from(instance)
if not pin or not pin.enabled():
return original_func(*args, **kwargs)
endpoint_name = deep_getattr(instance, '_endpoint._endpoint_prefix')
with pin.tracer.trace('{}.command'.format(endpoint_name),
service='{}.{}'.format(pin.service, endpoint_name),
span_type=SpanTypes.HTTP) as span:
span.set_tag(SPAN_MEASURED_KEY)
operation = None
if args:
operation = args[0]
span.resource = '%s.%s' % (endpoint_name, operation.lower())
else:
span.resource = endpoint_name
aws.add_span_arg_tags(span, endpoint_name, args, ARGS_NAME, TRACED_ARGS)
region_name = deep_getattr(instance, 'meta.region_name')
meta = {
'aws.agent': 'botocore',
'aws.operation': operation,
'aws.region': region_name,
}
span.set_tags(meta)
result = original_func(*args, **kwargs)
response_meta = result['ResponseMetadata']
http_status_code = response_meta.get('HTTPStatusCode')
if http_status_code:
span.set_tag(http.STATUS_CODE, http_status_code)
retry_attempts = response_meta.get('RetryAttempts')
if retry_attempts:
span.set_tag('retry_attempts', retry_attempts)
request_id = response_meta.get('RequestId')
if request_id:
span.set_tag('aws.requestid', request_id)
# set analytics sample rate
span.set_tag(
ANALYTICS_SAMPLE_RATE_KEY,
config.botocore.get_analytics_sample_rate()
)
return result
| def patched_api_call(original_func, instance, args, kwargs):
pin = Pin.get_from(instance)
if not pin or not pin.enabled():
return original_func(*args, **kwargs)
endpoint_name = deep_getattr(instance, '_endpoint._endpoint_prefix')
with pin.tracer.trace('{}.command'.format(endpoint_name),
service='{}.{}'.format(pin.service, endpoint_name),
span_type=SpanTypes.HTTP) as span:
span.set_tag(SPAN_MEASURED_KEY)
operation = None
if args:
operation = args[0]
span.resource = '%s.%s' % (endpoint_name, operation.lower())
else:
span.resource = endpoint_name
aws.add_span_arg_tags(span, endpoint_name, args, ARGS_NAME, TRACED_ARGS)
region_name = deep_getattr(instance, 'meta.region_name')
meta = {
'aws.agent': 'botocore',
'aws.operation': operation,
'aws.region': region_name,
}
span.set_tags(meta)
result = original_func(*args, **kwargs)
response_meta = result['ResponseMetadata']
http_status_code = response_meta.get('HTTPStatusCode')
if http_status_code:
span.set_tag(http.STATUS_CODE, http_status_code)
retry_attempts = response_meta.get('RetryAttempts')
if 'RetryAttempts' in response_meta:
span.set_tag('retry_attempts', response_meta['RetryAttempts'])
span.set_tag('retry_attempts', retry_attempts)
request_id = response_meta.get('RequestId')
if request_id:
span.set_tag('aws.requestid', request_id)
# set analytics sample rate
span.set_tag(
ANALYTICS_SAMPLE_RATE_KEY,
config.botocore.get_analytics_sample_rate()
)
return result
|
48,018 | def get_model_class(name):
for cls in get_all_subclasses(Model):
if cls.__name__.lower() == name:
return cls
raise ValueError("There is not model class with this name: {}".format(name))
| def get_model_class(name):
for cls in get_all_subclasses(Model):
if cls.__name__.lower() == name:
return cls
raise ValueError("There is no model class with this name: {}".format(name))
|
47,392 | def dice_loss(inputs: Tensor, targets: Tensor, num_masks: float) -> Tensor:
r"""
Compute the DICE loss, similar to generalized IOU for masks as follow
.. math::
\mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x \cap y }{x \cup y + 1}}
In practice, since `targets` is a binary mask, (only 0s and 1s), dice can be computed as follow
.. math::
\mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x * y }{x + y + 1}}
Args:
inputs (Tensor): A tensor representing a mask
targets (Tensor): A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs (0 for the negative class and 1 for the positive class).
Returns:
Tensor: The computed loss
"""
probs: Tensor = inputs.sigmoid().flatten(1)
numerator: Tensor = 2 * (probs * targets).sum(-1)
denominator: Tensor = probs.sum(-1) + targets.sum(-1)
loss: Tensor = 1 - (numerator + 1) / (denominator + 1)
loss = loss.sum() / num_masks
return loss
| def dice_loss(inputs: Tensor, targets: Tensor, num_masks: float) -> Tensor:
r"""
Compute the DICE loss, similar to generalized IOU for masks as follow
.. math::
\mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x \cap y }{x \cup y + 1}}
In practice, since `targets` is a binary mask, (only 0s and 1s), dice can be computed as follow
.. math::
\mathcal{L}_{\text{dice}(x, y) = 1 - \frac{2 * x * y }{x + y + 1}}
Args:
inputs (`torch.Tensor`): A tensor representing a mask
targets (Tensor): A tensor with the same shape as inputs. Stores the binary classification labels for each element in inputs (0 for the negative class and 1 for the positive class).
Returns:
Tensor: The computed loss
"""
probs: Tensor = inputs.sigmoid().flatten(1)
numerator: Tensor = 2 * (probs * targets).sum(-1)
denominator: Tensor = probs.sum(-1) + targets.sum(-1)
loss: Tensor = 1 - (numerator + 1) / (denominator + 1)
loss = loss.sum() / num_masks
return loss
|
42,821 | def move_git_repo(source_path, new_path):
"""
Moves git folder and .gitignore to the new backup directory.
"""
if os.path.exists(os.path.join(new_path, '.git')) or os.path.exists(os.path.join(new_path, '.gitignore')):
print_red_bold("Git repository already exists new path ({})".format(new_path))
print_red_bold("Please choose a different directory")
sys.exit()
git_dir = os.path.join(source_path, '.git')
git_ignore_file = os.path.join(source_path, '.gitignore')
try:
move(git_dir, new_path)
move(git_ignore_file, new_path)
print_blue_bold("Moving git repo to new location.")
except FileNotFoundError:
pass
| def move_git_repo(source_path, new_path):
"""
Moves git folder and .gitignore to the new backup directory.
"""
if os.path.exists(os.path.join(new_path, '.git')) or os.path.exists(os.path.join(new_path, '.gitignore')):
print_red_bold("A git repo already exists here: {}".format(new_path))
print_red_bold("Please choose a different directory")
sys.exit()
git_dir = os.path.join(source_path, '.git')
git_ignore_file = os.path.join(source_path, '.gitignore')
try:
move(git_dir, new_path)
move(git_ignore_file, new_path)
print_blue_bold("Moving git repo to new location.")
except FileNotFoundError:
pass
|
59,192 | def _run_pip(args, additional_paths=None):
# Add our bundled software to the sys.path so we can import it
if additional_paths is not None:
sys.path = additional_paths + sys.path
# Invoke pip as if it's the main module, and catch the exit.
backup_argv = sys.argv[:]
sys.argv[1:] = args
try:
runpy.run_module("pip", run_name="__main__", alter_sys=True)
except SystemExit as e:
return e.code
finally:
sys.argv[:] = backup_argv
raise SystemError("pip have not exited, that should never happen")
| def _run_pip(args, additional_paths=None):
# Add our bundled software to the sys.path so we can import it
if additional_paths is not None:
sys.path = additional_paths + sys.path
# Invoke pip as if it's the main module, and catch the exit.
backup_argv = sys.argv[:]
sys.argv[1:] = args
try:
runpy.run_module("pip", run_name="__main__", alter_sys=True)
except SystemExit as exc:
return exc.code
finally:
sys.argv[:] = backup_argv
raise SystemError("pip have not exited, that should never happen")
|
41,205 | def decompose_clifford_tableau_to_operations(
qubits: List['cirq.Qid'], clifford_tableau: qis.CliffordTableau
) -> List[ops.Operation]:
"""Decompose an n-qubit Clifford Tableau into a list of one/two qubit operations.
Args:
qubits: The list of qubits being operated on.
clifford_tableau: The Clifford Tableau for decomposition.
Returns:
A list of operations reconstructs the same Clifford tableau.
"""
if len(qubits) != clifford_tableau.n:
raise ValueError(
f"The number of qubits must be the same as the number of Clifford Tableau."
)
assert (
clifford_tableau._validate()
), "The provided clifford_tableau must satisfy the symplectic property."
t: qis.CliffordTableau = clifford_tableau.copy()
operations: List[ops.Operation] = []
args = sim.ActOnCliffordTableauArgs(
tableau=t, axes=[], prng=np.random.RandomState(), log_of_measurement_results={}
)
_X_with_ops = functools.partial(_X, args=args, operations=operations, qubits=qubits)
_Z_with_ops = functools.partial(_Z, args=args, operations=operations, qubits=qubits)
_H_with_ops = functools.partial(_H, args=args, operations=operations, qubits=qubits)
_S_with_ops = functools.partial(_Sdg, args=args, operations=operations, qubits=qubits)
_CNOT_with_ops = functools.partial(_CNOT, args=args, operations=operations, qubits=qubits)
_SWAP_with_ops = functools.partial(_SWAP, args=args, operations=operations, qubits=qubits)
# The procedure is based on Theorem 8 in
# [1] S. Aaronson, D. Gottesman, *Improved Simulation of Stabilizer Circuits*,
# Phys. Rev. A 70, 052328 (2004). https://arxiv.org/abs/quant-ph/0406196
# with modification by doing it row-by-row instead.
# Suppose we have a Clifford Tableau:
# Xs Zs
# Destabilizers: [ A | B ]
# Stabilizers: [ C | D ]
for i in range(t.n):
# Step 1a: Make the diagonal element of A as 1 by Hadamard gate if necessary.
if not t.xs[i, i] and t.zs[i, i]:
_H_with_ops(i)
# Step 1b: Make the diagonal element of A as 1 by swapping gate if necessary.
if not t.xs[i, i]:
for j in range(i + 1, t.n):
if t.xs[i, j]:
_SWAP_with_ops(i, j)
break
# Step 1c: We may still not be able to find non-zero element in whole Xs row. Then,
# apply swap + Hadamard from zs. It is guaranteed to find one by lemma 5 in [1].
if not t.xs[i, i]:
for j in range(i + 1, t.n):
if t.zs[i, j]:
_H_with_ops(j)
_SWAP_with_ops(i, j)
break
# Step 2: Eliminate the elements in A By CNOT and phase gate (i-th row)
# first i rows of destabilizers: [ I 0 | 0 0 ]
_ = [_CNOT_with_ops(i, j) for j in range(i + 1, t.n) if t.xs[i, j]]
if np.any(t.zs[i, i:]):
if not t.zs[i, i]:
_S_with_ops(i)
_ = [_CNOT_with_ops(j, i) for j in range(i + 1, t.n) if t.zs[i, j]]
_S_with_ops(i)
# Step 3: Eliminate the elements in D By CNOT and phase gate (i-th row)
# first i rows of stabilizers: [ 0 0 | I 0 ]
_ = [_CNOT_with_ops(j, i) for j in range(i + 1, t.n) if t.zs[i + t.n, j]]
if np.any(t.xs[i + t.n, i:]):
# Swap xs and zs
_H_with_ops(i)
_ = [_CNOT_with_ops(i, j) for j in range(i + 1, t.n) if t.xs[i + t.n, j]]
if t.zs[i + t.n, i]:
_S_with_ops(i)
_H_with_ops(i)
# Step 4: Correct the phase of tableau
_ = [_Z_with_ops(i) for i, p in enumerate(t.rs[: t.n]) if p]
_ = [_X_with_ops(i) for i, p in enumerate(t.rs[t.n :]) if p]
# Step 5: invert the operations by reversing the orde: (AB)^{+} = B^{+} A^{+}.
# Note only S gate is not self-adjoint.
return operations[::-1]
| def decompose_clifford_tableau_to_operations(
qubits: List['cirq.Qid'], clifford_tableau: qis.CliffordTableau
) -> List[ops.Operation]:
"""Decompose an n-qubit Clifford Tableau into a list of one/two qubit operations.
Args:
qubits: The list of qubits being operated on.
clifford_tableau: The Clifford Tableau for decomposition.
Returns:
A list of operations reconstructs the same Clifford tableau.
"""
if len(qubits) != clifford_tableau.n:
raise ValueError(
f"The number of qubits must be the same as the number of Clifford Tableau."
)
assert (
clifford_tableau._validate()
), "The provided clifford_tableau must satisfy the symplectic property."
t: qis.CliffordTableau = clifford_tableau.copy()
operations: List[ops.Operation] = []
args = sim.ActOnCliffordTableauArgs(
tableau=t, axes=[], prng=np.random.RandomState(), log_of_measurement_results={}
)
_X_with_ops = functools.partial(_X, args=args, operations=operations, qubits=qubits)
_Z_with_ops = functools.partial(_Z, args=args, operations=operations, qubits=qubits)
_H_with_ops = functools.partial(_H, args=args, operations=operations, qubits=qubits)
_S_with_ops = functools.partial(_Sdg, args=args, operations=operations, qubits=qubits)
_CNOT_with_ops = functools.partial(_CNOT, args=args, operations=operations, qubits=qubits)
_SWAP_with_ops = functools.partial(_SWAP, args=args, operations=operations, qubits=qubits)
# The procedure is based on Theorem 8 in
# [1] S. Aaronson, D. Gottesman, *Improved Simulation of Stabilizer Circuits*,
# Phys. Rev. A 70, 052328 (2004). https://arxiv.org/abs/quant-ph/0406196
# with modification by doing it row-by-row instead.
# Suppose we have a Clifford Tableau:
# Xs Zs
# Destabilizers: [ A | B ]
# Stabilizers: [ C | D ]
for i in range(t.n):
# Step 1a: Make the diagonal element of A as 1 by Hadamard gate if necessary.
if not t.xs[i, i] and t.zs[i, i]:
_H_with_ops(i)
# Step 1b: Make the diagonal element of A as 1 by swapping gate if necessary.
if not t.xs[i, i]:
for j in range(i + 1, t.n):
if t.xs[i, j]:
_SWAP_with_ops(i, j)
break
# Step 1c: We may still not be able to find non-zero element in whole Xs row. Then,
# apply swap + Hadamard from zs. It is guaranteed to find one by lemma 5 in [1].
if not t.xs[i, i]:
for j in range(i + 1, t.n):
if t.zs[i, j]:
_H_with_ops(j)
_SWAP_with_ops(i, j)
break
# Step 2: Eliminate the elements in A By CNOT and phase gate (i-th row)
# first i rows of destabilizers: [ I 0 | 0 0 ]
_ = [_CNOT_with_ops(i, j) for j in range(i + 1, t.n) if t.xs[i, j]]
if np.any(t.zs[i, i:]):
if not t.zs[i, i]:
_S_with_ops(i)
_ = [_CNOT_with_ops(j, i) for j in range(i + 1, t.n) if t.zs[i, j]]
_S_with_ops(i)
# Step 3: Eliminate the elements in D By CNOT and phase gate (i-th row)
# first i rows of stabilizers: [ 0 0 | I 0 ]
_ = [_CNOT_with_ops(j, i) for j in range(i + 1, t.n) if t.zs[i + t.n, j]]
if np.any(t.xs[i + t.n, i:]):
# Swap xs and zs
_H_with_ops(i)
_ = [_CNOT_with_ops(i, j) for j in range(i + 1, t.n) if t.xs[i + t.n, j]]
if t.zs[i + t.n, i]:
_S_with_ops(i)
_H_with_ops(i)
# Step 4: Correct the phase of tableau
_ = [_Z_with_ops(i) for i, p in enumerate(t.rs[: t.n]) if p]
_ = [_X_with_ops(i) for i, p in enumerate(t.rs[t.n :]) if p]
# Step 5: invert the operations by reversing the order: (AB)^{+} = B^{+} A^{+}.
# Note only S gate is not self-adjoint.
return operations[::-1]
|
44,601 | def urisfy_regex_or_tree(
unit: Any, is_ascii: bool, is_wide: bool, is_nocase: bool,
) -> Optional[UrsaExpression]:
or_strings = flatten_regex_or_tree(unit)
if or_strings and all(s is not None for s in or_strings):
or_ursa_strings: List[UrsaExpression] = []
for s in or_strings:
ursified_s = ursify_regex_text(
s, is_ascii=is_ascii, is_wide=is_wide, is_nocase=is_nocase
)
if ursified_s:
or_ursa_strings.append(ursified_s)
if or_ursa_strings:
return UrsaExpression.or_(*or_ursa_strings)
else:
return None
else:
return None
| def urisfy_regex_or_tree(
unit: Any, is_ascii: bool, is_wide: bool, is_nocase: bool,
) -> Optional[UrsaExpression]:
or_strings = flatten_regex_or_tree(unit)
if or_strings and all(or_strings):
or_ursa_strings: List[UrsaExpression] = []
for s in or_strings:
ursified_s = ursify_regex_text(
s, is_ascii=is_ascii, is_wide=is_wide, is_nocase=is_nocase
)
if ursified_s:
or_ursa_strings.append(ursified_s)
if or_ursa_strings:
return UrsaExpression.or_(*or_ursa_strings)
else:
return None
else:
return None
|
57,898 | def main() -> None:
params = {k: v for k, v in demisto.params().items() if v is not None}
params['indicator_type'] = FeedIndicatorType.File
params['feed_name_to_config'] = {
'File': {
'url': params.get("url") + "/api/v1/",
'extractor': "data",
'indicator': 'sha256_hash',
'indicator_type': FeedIndicatorType.File,
'relation_name': EntityRelationship.Relationships.INDICATOR_OF,
'reverse_relationship_name': EntityRelationship.Relationships.INDICATED_BY,
'relation_entity_b': 'signature',
'relation_entity_b_type': 'Malware',
'create_relations_function': custom_build_relationships,
'mapping': {
'sha256_hash': 'sha256',
'sha1_hash': 'sha1',
'md5_hash': 'md5',
'first_seen': 'firstseenbyfeed',
'last_seen': 'lastseenbyfeed',
'file_name': 'Associated File Names',
'file_size': 'size',
'file_type': 'filetype',
'reporter': 'reported_by',
'signature': 'malware_family',
'imphash': 'imphash',
'ssdeep': 'ssdeep',
'tags': 'tags'
}
}
}
params['data'] = {'query': 'get_recent',
'selector': 'time'}
feed_main(params, 'MalwareBazaar Feed', 'malwarebazzar')
| def main() -> None:
params = {k: v for k, v in demisto.params().items() if v is not None}
params['indicator_type'] = FeedIndicatorType.File
params['feed_name_to_config'] = {
'File': {
'url': params.get("url") + "/api/v1/",
'extractor': "data",
'indicator': 'sha256_hash',
'indicator_type': FeedIndicatorType.File,
'relation_name': EntityRelationship.Relationships.INDICATOR_OF,
'reverse_relationship_name': EntityRelationship.Relationships.INDICATED_BY,
'relation_entity_b': 'signature',
'relation_entity_b_type': 'Malware',
'create_relations_function': custom_build_relationships,
'mapping': {
'sha256_hash': 'sha256',
'sha1_hash': 'sha1',
'md5_hash': 'md5',
'first_seen': 'firstseenbyfeed',
'last_seen': 'lastseenbyfeed',
'file_name': 'Associated File Names',
'file_size': 'size',
'file_type': 'filetype',
'reporter': 'reported_by',
'signature': 'malware_family',
'imphash': 'imphash',
'ssdeep': 'ssdeep',
'tags': 'tags'
}
}
}
params['data'] = {'query': 'get_recent',
'selector': 'time'} # query params to get only the recent changes for the incremental feed
feed_main(params, 'MalwareBazaar Feed', 'malwarebazzar')
|
1,103 | def test_output_version():
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.9')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
_version = '0.10'
resource_monitor = False
obj = DerivedInterface1()
assert obj._check_version_requirements(obj._outputs()) == []
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.11')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
_version = '0.10'
resource_monitor = False
obj = DerivedInterface1()
assert obj._check_version_requirements(obj._outputs()) == ['foo']
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.11')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
_version = '0.10'
resource_monitor = False
def _run_interface(self, runtime):
return runtime
def _list_outputs(self):
return {'foo': 1}
obj = DerivedInterface1()
with pytest.raises(TypeError):
obj.run()
| def test_output_version():
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.9')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
_version = '0.10'
resource_monitor = False
obj = DerivedInterface1()
assert obj._check_version_requirements(obj._outputs()) == []
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.11')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
_version = '0.10'
resource_monitor = False
obj = DerivedInterface1()
assert obj._check_version_requirements(obj._outputs()) == ['foo']
class InputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int')
class OutputSpec(nib.TraitedSpec):
foo = nib.traits.Int(desc='a random int', min_ver='0.11')
class DerivedInterface1(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
_version = '0.10'
resource_monitor = False
def _run_interface(self, runtime):
return runtime
def _list_outputs(self):
return {'foo': 1}
obj = DerivedInterface1()
with pytest.raises(KeyError):
obj.run()
|
12,385 | def remove_default_ca_certs(distro_name):
"""
Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}.
"""
distro_cfg = _distro_ca_certs_configs(distro_name)
util.delete_dir_contents(distro_cfg['ca_cert_path'])
util.delete_dir_contents(distro_cfg['ca_cert_system_path'])
util.write_file(distro_cfg['ca_cert_config'], "", mode=0o644)
if distro_name == 'debian' or distro_name == 'ubuntu':
debconf_sel = (
"ca-certificates ca-certificates/trust_new_crts " + "select no")
subp.subp(('debconf-set-selections', '-'), debconf_sel)
| def remove_default_ca_certs(distro_name):
"""
Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}.
"""
distro_cfg = _distro_ca_certs_configs(distro_name)
util.delete_dir_contents(distro_cfg['ca_cert_path'])
util.delete_dir_contents(distro_cfg['ca_cert_system_path'])
util.write_file(distro_cfg['ca_cert_config'], "", mode=0o644)
if distro_name in ['debian', 'ubuntu']:
debconf_sel = (
"ca-certificates ca-certificates/trust_new_crts " + "select no")
subp.subp(('debconf-set-selections', '-'), debconf_sel)
|
8,506 | def user_dictize(
user, context, include_password_hash=False,
include_plugin_extras=False):
if context.get('with_capacity'):
user, capacity = user
result_dict = d.table_dictize(user, context, capacity=capacity)
else:
result_dict = d.table_dictize(user, context)
password_hash = result_dict.pop('password')
del result_dict['reset_key']
result_dict['display_name'] = user.display_name
result_dict['email_hash'] = user.email_hash
result_dict['number_created_packages'] = user.number_created_packages(
include_private_and_draft=context.get(
'count_private_and_draft_datasets', False))
requester = context.get('user')
reset_key = result_dict.pop('reset_key', None)
apikey = result_dict.pop('apikey', None)
email = result_dict.pop('email', None)
plugin_extras = result_dict.pop('plugin_extras', None)
if context.get('keep_email', False):
result_dict['email'] = email
if context.get('keep_apikey', False):
result_dict['apikey'] = apikey
if requester == user.name:
result_dict['apikey'] = apikey
result_dict['email'] = email
if authz.is_sysadmin(requester):
result_dict['apikey'] = apikey
result_dict['email'] = email
if include_password_hash:
result_dict['password_hash'] = password_hash
if include_plugin_extras:
result_dict['plugin_extras'] = copy.deepcopy(
plugin_extras) if plugin_extras else plugin_extras
model = context['model']
session = model.Session
image_url = result_dict.get('image_url')
result_dict['image_display_url'] = image_url
if image_url and not image_url.startswith('http'):
#munge here should not have an effect only doing it incase
#of potential vulnerability of dodgy api input
image_url = munge.munge_filename_legacy(image_url)
result_dict['image_display_url'] = h.url_for_static(
'uploads/user/%s' % result_dict.get('image_url'),
qualified=True
)
return result_dict
| def user_dictize(
user, context, include_password_hash=False,
include_plugin_extras=False):
if context.get('with_capacity'):
user, capacity = user
result_dict = d.table_dictize(user, context, capacity=capacity)
else:
result_dict = d.table_dictize(user, context)
password_hash = result_dict.pop('password')
del result_dict['reset_key']
result_dict['display_name'] = user.display_name
result_dict['email_hash'] = user.email_hash
result_dict['number_created_packages'] = user.number_created_packages(
include_private_and_draft=context.get(
'count_private_and_draft_datasets', False))
requester = context.get('user')
reset_key = result_dict.pop('reset_key', None)
apikey = result_dict.pop('apikey', None)
email = result_dict.pop('email', None)
plugin_extras = result_dict.pop('plugin_extras', None)
if context.get('keep_email', False):
result_dict['email'] = email
if context.get('keep_apikey', False):
result_dict['apikey'] = apikey
if requester == user.name:
result_dict['apikey'] = apikey
result_dict['email'] = email
if authz.is_sysadmin(requester):
result_dict['apikey'] = apikey
result_dict['email'] = email
if include_password_hash:
result_dict['password_hash'] = password_hash
if include_plugin_extras:
result_dict['plugin_extras'] = copy.deepcopy(
plugin_extras) if plugin_extras else plugin_extras
model = context['model']
session = model.Session
image_url = result_dict.get('image_url')
result_dict['image_display_url'] = image_url
if image_url and not image_url.startswith('http'):
#munge here should not have an effect only doing it incase
# of potential vulnerability of dodgy api input.
image_url = munge.munge_filename_legacy(image_url)
result_dict['image_display_url'] = h.url_for_static(
'uploads/user/%s' % result_dict.get('image_url'),
qualified=True
)
return result_dict
|
56,340 | def _compute_dt_correlations(catalog, master, min_link, event_id_mapper,
stream_dict, min_cc, extract_len, pre_pick,
shift_len, interpolate, max_workers=1):
""" Compute cross-correlation delay times. """
max_workers = max_workers or 1
Logger.info(
f"Correlating {master.resource_id.id} with {len(catalog)} events")
differential_times_dict = dict()
master_stream = _prepare_stream(
stream=stream_dict[master.resource_id.id], event=master,
extract_len=extract_len, pre_pick=pre_pick)
available_seed_ids = {tr.id for st in master_stream.values() for tr in st}
Logger.info(f"The channels provided are: {available_seed_ids}")
master_seed_ids = {
SeedPickID(pick.waveform_id.get_seed_string(), pick.phase_hint[0])
for pick in master.picks if
pick.phase_hint[0] in "PS" and
pick.waveform_id.get_seed_string() in available_seed_ids}
Logger.info(f"Using channels: {master_seed_ids}")
# Dictionary of travel-times for master keyed by {station}_{phase_hint}
master_tts = dict()
master_origin_time = (master.preferred_origin() or master.origins[0]).time
for pick in master.picks:
if pick.phase_hint[0] not in "PS":
continue
tt1 = pick.time - master_origin_time
master_tts.update({
"{0}_{1}".format(
pick.waveform_id.station_code, pick.phase_hint[0]): tt1})
matched_length = extract_len + (2 * shift_len)
matched_pre_pick = pre_pick + shift_len
# We will use this to maintain order
event_dict = {event.resource_id.id: event for event in catalog}
event_ids = list(event_dict.keys())
matched_streams = {
event_id: _prepare_stream(
stream=stream_dict[event_id], event=event_dict[event_id],
extract_len=matched_length, pre_pick=matched_pre_pick,
seed_pick_ids=master_seed_ids)
for event_id in event_ids}
sampling_rates = {tr.stats.sampling_rate for st in master_stream.values()
for tr in st}
for phase_hint in master_stream.keys(): # Loop over P and S separately
for sampling_rate in sampling_rates: # Loop over separate samp rates
delta = 1.0 / sampling_rate
_master_stream = master_stream[phase_hint].select(
sampling_rate=sampling_rate)
_matched_streams = dict()
for key, value in matched_streams.items():
_st = value[phase_hint].select(sampling_rate=sampling_rate)
if len(_st) > 0:
_matched_streams.update({key: _st})
if len(_matched_streams) == 0:
Logger.info("No matching data for {0}, {1} phase".format(
master.resource_id.id, phase_hint))
continue
# Check lengths
master_length = [tr.stats.npts for tr in _master_stream]
if len(set(master_length)) > 1:
Logger.warning("Multiple lengths found - check that you "
"are providing sufficient data")
master_length = Counter(master_length).most_common(1)[0][0]
_master_stream = _master_stream.select(npts=master_length)
matched_length = [tr.stats.npts for st in _matched_streams.values() for tr in st]
if len(set(matched_length)) > 1:
Logger.warning(
"Multiple lengths found in matched data - check that you "
"are providing sufficient data")
matched_length = Counter(matched_length).most_common(1)[0][0]
# Remove empty streams and generate an ordered list of event_ids
used_event_ids, used_matched_streams = [], []
for event_id, _matched_stream in _matched_streams.items():
_matched_stream = _matched_stream.select(npts=matched_length)
if len(_matched_stream) > 0:
used_event_ids.append(event_id)
used_matched_streams.append(_matched_stream)
# Check that there are matching seed ids.
master_seed_ids = set(tr.id for tr in _master_stream)
matched_seed_ids = set(tr.id for st in used_matched_streams for tr in st)
if master_seed_ids not in matched_seed_ids:
Logger.warning(
"After checking length there are no matched traces: "
f"master: {master_seed_ids}, matched: {matched_seed_ids}")
continue
# Do the correlations
ccc_out, used_chans = _concatenate_and_correlate(
template=_master_stream, streams=used_matched_streams,
cores=max_workers)
# Convert ccc_out to pick-time
for i, used_event_id in enumerate(used_event_ids):
for j, chan in enumerate(used_chans[i]):
if not chan.used:
continue
correlation = ccc_out[i][j]
if interpolate:
shift, cc_max = _xcorr_interp(correlation, dt=delta)
else:
cc_max = np.amax(correlation)
shift = np.argmax(correlation) * delta
if cc_max < min_cc:
continue
shift -= shift_len
pick = [p for p in event_dict[used_event_id].picks
if p.phase_hint[0] == phase_hint
and p.waveform_id.station_code == chan.channel[0]
and p.waveform_id.channel_code == chan.channel[1]]
pick = sorted(pick, key=lambda p: p.time)[0]
tt2 = pick.time - (
event_dict[used_event_id].preferred_origin() or
event_dict[used_event_id].origins[0]).time
tt2 += shift
diff_time = differential_times_dict.get(
used_event_id, None)
if diff_time is None:
diff_time = _EventPair(
event_id_1=event_id_mapper[master.resource_id.id],
event_id_2=event_id_mapper[used_event_id])
diff_time.obs.append(
_DTObs(station=chan.channel[0],
tt1=master_tts["{0}_{1}".format(
chan.channel[0], phase_hint)],
tt2=tt2, weight=cc_max ** 2, phase=phase_hint[0]))
differential_times_dict.update({used_event_id: diff_time})
# Threshold on min_link
differential_times = [dt for dt in differential_times_dict.values()
if len(dt.obs) >= min_link]
return differential_times
| def _compute_dt_correlations(catalog, master, min_link, event_id_mapper,
stream_dict, min_cc, extract_len, pre_pick,
shift_len, interpolate, max_workers=1):
""" Compute cross-correlation delay times. """
max_workers = max_workers or 1
Logger.info(
f"Correlating {master.resource_id.id} with {len(catalog)} events")
differential_times_dict = dict()
master_stream = _prepare_stream(
stream=stream_dict[master.resource_id.id], event=master,
extract_len=extract_len, pre_pick=pre_pick)
available_seed_ids = {tr.id for st in master_stream.values() for tr in st}
Logger.info(f"The channels provided are: {available_seed_ids}")
master_seed_ids = {
SeedPickID(pick.waveform_id.get_seed_string(), pick.phase_hint[0])
for pick in master.picks if
pick.phase_hint[0] in "PS" and
pick.waveform_id.get_seed_string() in available_seed_ids}
Logger.info(f"Using channels: {master_seed_ids}")
# Dictionary of travel-times for master keyed by {station}_{phase_hint}
master_tts = dict()
master_origin_time = (master.preferred_origin() or master.origins[0]).time
for pick in master.picks:
if pick.phase_hint[0] not in "PS":
continue
tt1 = pick.time - master_origin_time
master_tts.update({
"{0}_{1}".format(
pick.waveform_id.station_code, pick.phase_hint[0]): tt1})
matched_length = extract_len + (2 * shift_len)
matched_pre_pick = pre_pick + shift_len
# We will use this to maintain order
event_dict = {event.resource_id.id: event for event in catalog}
event_ids = list(event_dict.keys())
matched_streams = {
event_id: _prepare_stream(
stream=stream_dict[event_id], event=event_dict[event_id],
extract_len=matched_length, pre_pick=matched_pre_pick,
seed_pick_ids=master_seed_ids)
for event_id in event_ids}
sampling_rates = {tr.stats.sampling_rate for st in master_stream.values()
for tr in st}
for phase_hint in master_stream.keys(): # Loop over P and S separately
for sampling_rate in sampling_rates: # Loop over separate samp rates
delta = 1.0 / sampling_rate
_master_stream = master_stream[phase_hint].select(
sampling_rate=sampling_rate)
_matched_streams = dict()
for key, value in matched_streams.items():
_st = value[phase_hint].select(sampling_rate=sampling_rate)
if len(_st) > 0:
_matched_streams.update({key: _st})
if len(_matched_streams) == 0:
Logger.info("No matching data for {0}, {1} phase".format(
master.resource_id.id, phase_hint))
continue
# Check lengths
master_length = [tr.stats.npts for tr in _master_stream]
if len(set(master_length)) > 1:
Logger.warning("Multiple lengths found - check that you "
"are providing sufficient data")
master_length = Counter(master_length).most_common(1)[0][0]
_master_stream = _master_stream.select(npts=master_length)
matched_length = [
tr.stats.npts for st in _matched_streams.values()
for tr in st]
if len(set(matched_length)) > 1:
Logger.warning(
"Multiple lengths found in matched data - check that you "
"are providing sufficient data")
matched_length = Counter(matched_length).most_common(1)[0][0]
# Remove empty streams and generate an ordered list of event_ids
used_event_ids, used_matched_streams = [], []
for event_id, _matched_stream in _matched_streams.items():
_matched_stream = _matched_stream.select(npts=matched_length)
if len(_matched_stream) > 0:
used_event_ids.append(event_id)
used_matched_streams.append(_matched_stream)
# Check that there are matching seed ids.
master_seed_ids = set(tr.id for tr in _master_stream)
matched_seed_ids = set(tr.id for st in used_matched_streams for tr in st)
if master_seed_ids not in matched_seed_ids:
Logger.warning(
"After checking length there are no matched traces: "
f"master: {master_seed_ids}, matched: {matched_seed_ids}")
continue
# Do the correlations
ccc_out, used_chans = _concatenate_and_correlate(
template=_master_stream, streams=used_matched_streams,
cores=max_workers)
# Convert ccc_out to pick-time
for i, used_event_id in enumerate(used_event_ids):
for j, chan in enumerate(used_chans[i]):
if not chan.used:
continue
correlation = ccc_out[i][j]
if interpolate:
shift, cc_max = _xcorr_interp(correlation, dt=delta)
else:
cc_max = np.amax(correlation)
shift = np.argmax(correlation) * delta
if cc_max < min_cc:
continue
shift -= shift_len
pick = [p for p in event_dict[used_event_id].picks
if p.phase_hint[0] == phase_hint
and p.waveform_id.station_code == chan.channel[0]
and p.waveform_id.channel_code == chan.channel[1]]
pick = sorted(pick, key=lambda p: p.time)[0]
tt2 = pick.time - (
event_dict[used_event_id].preferred_origin() or
event_dict[used_event_id].origins[0]).time
tt2 += shift
diff_time = differential_times_dict.get(
used_event_id, None)
if diff_time is None:
diff_time = _EventPair(
event_id_1=event_id_mapper[master.resource_id.id],
event_id_2=event_id_mapper[used_event_id])
diff_time.obs.append(
_DTObs(station=chan.channel[0],
tt1=master_tts["{0}_{1}".format(
chan.channel[0], phase_hint)],
tt2=tt2, weight=cc_max ** 2, phase=phase_hint[0]))
differential_times_dict.update({used_event_id: diff_time})
# Threshold on min_link
differential_times = [dt for dt in differential_times_dict.values()
if len(dt.obs) >= min_link]
return differential_times
|
10,891 | def install_pip():
print('')
print('Install pip')
print('')
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tmp = tempfile.mkdtemp(prefix='buildout-dev-')
try:
get_pip = os.path.join(tmp, 'get-pip.py')
with open(get_pip, 'wb') as f:
f.write(urlopen('https://bootstrap.pypa.io/get-pip.py').read())
if subprocess.call([sys.executable, get_pip]):
raise RuntimeError("pip failed.")
finally:
shutil.rmtree(tmp)
return_code = subprocess.call(
[sys.executable] + sys.argv + ['--no-clean']
)
sys.exit(return_code)
| def install_pip():
print('')
print('Install pip')
print('')
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
tmp = tempfile.mkdtemp(prefix='buildout-dev-')
try:
get_pip = os.path.join(tmp, 'get-pip.py')
with open(get_pip, 'wb') as f:
f.write(urlopen('https://bootstrap.pypa.io/get-pip.py').read())
if subprocess.call([sys.executable, get_pip]):
raise RuntimeError("Failed to install pip")
finally:
shutil.rmtree(tmp)
return_code = subprocess.call(
[sys.executable] + sys.argv + ['--no-clean']
)
sys.exit(return_code)
|
7,099 | def filter_ids(
pools: 'List[Pool]',
ids: 'Iterable[str]',
*,
warn: 'bool' = True,
out: 'IDTokens' = IDTokens.Task,
pattern_match: 'bool' = True,
# ) -> _RET:
):
"""Filter IDs against a pool of tasks.
Args:
pool:
The pool to match against.
ids:
List of IDs to match against the pool.
out:
The type of object to match:
* If IDTokens.Task all matching TaskProxies will be returned.
* If IDTokens.Cycle all CyclePoints with any matching tasks will
be returned.
warn:
Whether to log a warning if no matching tasks are found.
TODO:
Consider using wcmatch which would add support for
extglobs, namely brace syntax e.g. {foo,bar}.
"""
if out not in {IDTokens.Cycle, IDTokens.Task}:
raise ValueError(f'Invalid output format: {out}')
_cycles: 'List[PointBase]' = []
_tasks: 'List[TaskProxy]' = []
_not_matched: 'List[str]' = []
# enable / disable pattern matching
match: Callable[[Any, Any], bool]
if pattern_match:
match = fnmatchcase
else:
match = str.__eq__
pattern_ids = [
id_
for id_ in ids
if contains_fnmatch(id_)
]
if pattern_ids:
LOG.warning(f'IDs cannot contain globs: {", ".join(pattern_ids)}')
ids = [
id_
for id_ in ids
if id_ not in pattern_ids
]
_not_matched.extend(pattern_ids)
id_tokens_map: Dict[str, Tokens] = {}
for id_ in ids:
try:
id_tokens_map[id_] = Tokens(id_, relative=True)
except ValueError:
_not_matched.append(id_)
LOG.warning(f'Invalid ID: {id_}')
for id_, tokens in id_tokens_map.items():
for lowest_token in reversed(IDTokens):
if tokens.get(lowest_token.value):
break
# This needs to be a set to avoid getting two copies of matched tasks
# in cycle points that appear in both pools:
cycles = set()
tasks = []
# filter by cycle
if lowest_token == IDTokens.Cycle:
cycle = tokens[IDTokens.Cycle.value]
cycle_sel = tokens.get(IDTokens.Cycle.value + '_sel') or '*'
for pool in pools:
for icycle, itasks in pool.items():
if not itasks:
continue
if not point_match(icycle, cycle, pattern_match):
continue
if cycle_sel == '*':
cycles.add(icycle)
continue
for itask in itasks.values():
if match(itask.state.status, cycle_sel):
cycles.add(icycle)
break
# filter by task
elif lowest_token == IDTokens.Task: # noqa SIM106
cycle = tokens[IDTokens.Cycle.value]
cycle_sel_raw = tokens.get(IDTokens.Cycle.value + '_sel')
cycle_sel = cycle_sel_raw or '*'
task = tokens[IDTokens.Task.value]
task_sel_raw = tokens.get(IDTokens.Task.value + '_sel')
task_sel = task_sel_raw or '*'
for pool in pools:
for icycle, itasks in pool.items():
if not point_match(icycle, cycle, pattern_match):
continue
for itask in itasks.values():
if (
# check cycle selector
(
(
# disable cycle_sel if not defined if
# pattern matching is turned off
pattern_match is False
and cycle_sel_raw is None
)
or match(itask.state.status, cycle_sel)
)
# check namespace name
and itask.name_match(task, match_func=match)
# check task selector
and (
(
# disable task_sel if not defined if
# pattern matching is turned off
pattern_match is False
and task_sel_raw is None
)
or match(itask.state.status, task_sel)
)
):
tasks.append(itask)
else:
raise NotImplementedError
if not (cycles or tasks):
_not_matched.append(id_)
if warn:
LOG.warning(f"No active tasks matching: {id_}")
else:
_cycles.extend(list(cycles))
_tasks.extend(tasks)
ret: List[Any] = []
if out == IDTokens.Cycle:
_cycles.extend({
itask.point
for itask in _tasks
})
ret = _cycles
elif out == IDTokens.Task:
for pool in pools:
for icycle in _cycles:
if icycle in pool:
_tasks.extend(pool[icycle].values())
ret = _tasks
return ret, _not_matched
| def filter_ids(
pools: 'List[Pool]',
ids: 'Iterable[str]',
*,
warn: 'bool' = True,
out: 'IDTokens' = IDTokens.Task,
pattern_match: 'bool' = True,
# ) -> _RET:
):
"""Filter IDs against a pool of tasks.
Args:
pool:
The pool to match against.
ids:
List of IDs to match against the pool.
out:
The type of object to match:
* If IDTokens.Task all matching TaskProxies will be returned.
* If IDTokens.Cycle all CyclePoints with any matching tasks will
be returned.
warn:
Whether to log a warning if no matching tasks are found.
TODO:
Consider using wcmatch which would add support for
extglobs, namely brace syntax e.g. {foo,bar}.
"""
if out not in {IDTokens.Cycle, IDTokens.Task}:
raise ValueError(f'Invalid output format: {out}')
_cycles: 'List[PointBase]' = []
_tasks: 'List[TaskProxy]' = []
_not_matched: 'List[str]' = []
# enable / disable pattern matching
match: Callable[[Any, Any], bool]
if pattern_match:
match = fnmatchcase
else:
match = str.__eq__
pattern_ids = [
id_
for id_ in ids
if contains_fnmatch(id_)
]
if pattern_ids:
LOG.warning(f'IDs cannot contain globs: {", ".join(pattern_ids)}')
ids = [
id_
for id_ in ids
if id_ not in pattern_ids
]
_not_matched.extend(pattern_ids)
id_tokens_map: Dict[str, Tokens] = {}
for id_ in ids:
try:
id_tokens_map[id_] = Tokens(id_, relative=True)
except ValueError:
_not_matched.append(id_)
LOG.warning(f'Invalid ID: {id_}')
for id_, tokens in id_tokens_map.items():
for lowest_token in reversed(IDTokens):
if tokens.get(lowest_token.value):
break
# This needs to be a set to avoid getting two copies of matched tasks
# in cycle points that appear in both pools:
cycles = set()
tasks = []
# filter by cycle
if lowest_token == IDTokens.Cycle:
cycle = tokens[IDTokens.Cycle.value]
cycle_sel = tokens.get(IDTokens.Cycle.value + '_sel') or '*'
for pool in pools:
for icycle, itasks in pool.items():
if not itasks:
continue
if not point_match(icycle, cycle, pattern_match):
continue
if cycle_sel == '*':
cycles.add(icycle)
continue
for itask in itasks.values():
if match(itask.state.status, cycle_sel):
cycles.add(icycle)
break
# filter by task
elif lowest_token == IDTokens.Task: # noqa SIM106
cycle = tokens[IDTokens.Cycle.value]
cycle_sel_raw = tokens.get(IDTokens.Cycle.value + '_sel')
cycle_sel = cycle_sel_raw or '*'
task = tokens[IDTokens.Task.value]
task_sel_raw = tokens.get(IDTokens.Task.value + '_sel')
task_sel = task_sel_raw or '*'
for pool in pools:
for icycle, itasks in pool.items():
if not point_match(icycle, cycle, pattern_match):
continue
for itask in itasks.values():
if (
# check cycle selector
(
(
# disable cycle_sel if not defined if
# pattern matching is turned off
pattern_match is False
and cycle_sel_raw is None
)
or match(itask.state.status, cycle_sel)
)
# check namespace name
and itask.name_match(task, match_func=match)
# check task selector
and (
(
# disable task_sel if not defined if
# pattern matching is turned off
pattern_match is False
and task_sel_raw is None
)
or match(itask.state.status, task_sel)
)
):
tasks.append(itask)
else:
raise NotImplementedError
if not (cycles or tasks):
_not_matched.append(id_)
if warn:
LOG.warning(f"No active tasks matching: {id_}")
else:
_cycles.extend(cycles)
_tasks.extend(tasks)
ret: List[Any] = []
if out == IDTokens.Cycle:
_cycles.extend({
itask.point
for itask in _tasks
})
ret = _cycles
elif out == IDTokens.Task:
for pool in pools:
for icycle in _cycles:
if icycle in pool:
_tasks.extend(pool[icycle].values())
ret = _tasks
return ret, _not_matched
|
4,739 | def _parse_char_metrics(fh):
"""
Parse the given filehandle for character metrics information and return
the information as dicts.
It is assumed that the file cursor is on the line behind
'StartCharMetrics'.
Returns
-------
ascii_d : dict
A mapping "ASCII num of the character" to `.CharMetrics`.
name_d : dict
A mapping "character name" to `.CharMetrics`.
Notes
-----
This function is incomplete per the standard, but thus far parses
all the sample afm files tried.
"""
required_keys = {'C', 'WX', 'N', 'B'}
ascii_d = {}
name_d = {}
for line in fh:
# We are defensively letting values be utf8. The spec requires
# ascii, but there are non-compliant fonts in circulation
line = _to_str(line.rstrip()) # Convert from byte-literal
if line.startswith('EndCharMetrics'):
return ascii_d, name_d
# Split the metric line into a dictionary, keyed by metric identifiers
vals = dict(s.strip().split(' ', 1) for s in line.split(';') if s)
# There may be other metrics present, but only these are needed
if not required_keys.issubset(vals):
raise RuntimeError('Bad char metrics line: %s' % line)
num = _to_int(vals['C'])
wx = _to_float(vals['WX'])
name = vals['N']
bbox = _to_list_of_floats(vals['B'])
bbox = list(map(int, bbox))
metrics = CharMetrics(wx, name, bbox)
# Workaround: If the character name is 'Euro', give it the
# corresponding character code, according to WinAnsiEncoding (see PDF
# Reference).
if name == 'Euro':
num = 128
if name == 'minus':
num = ord("\N{MINUS SIGN}") # 0x2212
if num != -1:
ascii_d[num] = metrics
name_d[name] = metrics
raise RuntimeError('Bad parse')
| def _parse_char_metrics(fh):
"""
Parse the given filehandle for character metrics information and return
the information as dicts.
It is assumed that the file cursor is on the line behind
'StartCharMetrics'.
Returns
-------
ascii_d : dict
A mapping "ASCII num of the character" to `.CharMetrics`.
name_d : dict
A mapping "character name" to `.CharMetrics`.
Notes
-----
This function is incomplete per the standard, but thus far parses
all the sample afm files tried.
"""
required_keys = {'C', 'WX', 'N', 'B'}
ascii_d = {}
name_d = {}
for line in fh:
# We are defensively letting values be utf8. The spec requires
# ascii, but there are non-compliant fonts in circulation
line = _to_str(line.rstrip()) # Convert from byte-literal
if line.startswith('EndCharMetrics'):
return ascii_d, name_d
# Split the metric line into a dictionary, keyed by metric identifiers
vals = dict(s.strip().split(' ', 1) for s in line.split(';') if s)
# There may be other metrics present, but only these are needed
if not required_keys.issubset(vals):
raise RuntimeError('Bad char metrics line: %s' % line)
num = _to_int(vals['C'])
wx = _to_float(vals['WX'])
name = vals['N']
bbox = _to_list_of_floats(vals['B'])
bbox = list(map(int, bbox))
metrics = CharMetrics(wx, name, bbox)
# Workaround: If the character name is 'Euro', give it the
# corresponding character code, according to WinAnsiEncoding (see PDF
# Reference).
if name == 'Euro':
num = 128
elif name == 'minus':
num = ord("\N{MINUS SIGN}") # 0x2212
if num != -1:
ascii_d[num] = metrics
name_d[name] = metrics
raise RuntimeError('Bad parse')
|
37,295 | def _parse_pulse_args(backend, qubit_lo_freq, meas_lo_freq, qubit_lo_range,
meas_lo_range, schedule_los, meas_level,
meas_return, meas_map,
memory_slot_size,
rep_time, rep_delay,
parametric_pulses,
**run_config):
"""Build a pulse RunConfig replacing unset arguments with defaults derived from the `backend`.
See `assemble` for more information on the required arguments.
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
SchemaValidationError: if the given meas_level, rep_time, rep_delay is not allowed
for the given `backend`.
"""
# grab relevant info from backend if it exists
backend_config = None
backend_default = None
if backend:
backend_default = backend.defaults()
backend_config = backend.configuration()
if meas_level not in getattr(backend_config, 'meas_levels', [MeasLevel.CLASSIFIED]):
raise SchemaValidationError(
('meas_level = {} not supported for backend {}, only {} is supported'
).format(meas_level, backend_config.backend_name, backend_config.meas_levels)
)
if rep_time not in getattr(backend_config, 'rep_times', None):
raise SchemaValidationError(
('rep_time = {} not supported for backend {}, only {} is supported'
).format(rep_time, backend_config.backend_name, backend_config.rep_times)
)
if rep_delay not in getattr(backend_config, 'rep_delays', None):
raise SchemaValidationError(
('rep_delay = {} not supported for backend {}, only {} is supported'
).format(rep_delay, backend_config.backend_name, backend_config.rep_delays)
)
meas_map = meas_map or getattr(backend_config, 'meas_map', None)
schedule_los = schedule_los or []
if isinstance(schedule_los, (LoConfig, dict)):
schedule_los = [schedule_los]
# Convert to LoConfig if LO configuration supplied as dictionary
schedule_los = [lo_config if isinstance(lo_config, LoConfig) else LoConfig(lo_config)
for lo_config in schedule_los]
if not qubit_lo_freq and hasattr(backend_default, 'qubit_freq_est'):
qubit_lo_freq = backend_default.qubit_freq_est
if not meas_lo_freq and hasattr(backend_default, 'meas_freq_est'):
meas_lo_freq = backend_default.meas_freq_est
qubit_lo_range = qubit_lo_range or getattr(backend_config, 'qubit_lo_range', None)
meas_lo_range = meas_lo_range or getattr(backend_config, 'meas_lo_range', None)
dynamic_reprate_enabled = getattr(backend_config, 'dynamic_reprate_enabled', False)
rep_time = rep_time or getattr(backend_config, 'rep_times', None)
if rep_time:
if dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates are supported on this backend. 'rep_delay' will be "
"used instead, if specified.", RuntimeWarning)
if isinstance(rep_time, list):
rep_time = rep_time[0]
rep_time = int(rep_time * 1e6) # convert sec to ΞΌs
rep_delay = rep_delay or getattr(backend_config, 'rep_delays', None)
if rep_delay:
if not dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates not supported on this backend. 'rep_time' will be "
"used instead.", RuntimeWarning)
if isinstance(rep_delay, list):
rep_delay = rep_delay[0]
rep_delay = rep_delay * 1e6 # convert sec to ΞΌs
parametric_pulses = parametric_pulses or getattr(backend_config, 'parametric_pulses', [])
# create run configuration and populate
run_config_dict = dict(qubit_lo_freq=qubit_lo_freq,
meas_lo_freq=meas_lo_freq,
qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range,
schedule_los=schedule_los,
meas_level=meas_level,
meas_return=meas_return,
meas_map=meas_map,
memory_slot_size=memory_slot_size,
rep_time=rep_time,
rep_delay=rep_delay,
parametric_pulses=parametric_pulses,
**run_config)
run_config = RunConfig(**{k: v for k, v in run_config_dict.items() if v is not None})
return run_config
| def _parse_pulse_args(backend, qubit_lo_freq, meas_lo_freq, qubit_lo_range,
meas_lo_range, schedule_los, meas_level,
meas_return, meas_map,
memory_slot_size,
rep_time, rep_delay,
parametric_pulses,
**run_config):
"""Build a pulse RunConfig replacing unset arguments with defaults derived from the `backend`.
See `assemble` for more information on the required arguments.
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
Raises:
SchemaValidationError: if the given meas_level, rep_time, rep_delay is not allowed
for the given `backend`.
"""
# grab relevant info from backend if it exists
backend_config = None
backend_default = None
if backend:
backend_default = backend.defaults()
backend_config = backend.configuration()
if meas_level not in getattr(backend_config, 'meas_levels', [MeasLevel.CLASSIFIED]):
raise SchemaValidationError(
('meas_level = {} not supported for backend {}, only {} is supported'
).format(meas_level, backend_config.backend_name, backend_config.meas_levels)
)
if rep_time not in getattr(backend_config, 'rep_times', []):
raise SchemaValidationError(
('rep_time = {} not supported for backend {}, only {} is supported'
).format(rep_time, backend_config.backend_name, backend_config.rep_times)
)
if rep_delay not in getattr(backend_config, 'rep_delays', None):
raise SchemaValidationError(
('rep_delay = {} not supported for backend {}, only {} is supported'
).format(rep_delay, backend_config.backend_name, backend_config.rep_delays)
)
meas_map = meas_map or getattr(backend_config, 'meas_map', None)
schedule_los = schedule_los or []
if isinstance(schedule_los, (LoConfig, dict)):
schedule_los = [schedule_los]
# Convert to LoConfig if LO configuration supplied as dictionary
schedule_los = [lo_config if isinstance(lo_config, LoConfig) else LoConfig(lo_config)
for lo_config in schedule_los]
if not qubit_lo_freq and hasattr(backend_default, 'qubit_freq_est'):
qubit_lo_freq = backend_default.qubit_freq_est
if not meas_lo_freq and hasattr(backend_default, 'meas_freq_est'):
meas_lo_freq = backend_default.meas_freq_est
qubit_lo_range = qubit_lo_range or getattr(backend_config, 'qubit_lo_range', None)
meas_lo_range = meas_lo_range or getattr(backend_config, 'meas_lo_range', None)
dynamic_reprate_enabled = getattr(backend_config, 'dynamic_reprate_enabled', False)
rep_time = rep_time or getattr(backend_config, 'rep_times', None)
if rep_time:
if dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates are supported on this backend. 'rep_delay' will be "
"used instead, if specified.", RuntimeWarning)
if isinstance(rep_time, list):
rep_time = rep_time[0]
rep_time = int(rep_time * 1e6) # convert sec to ΞΌs
rep_delay = rep_delay or getattr(backend_config, 'rep_delays', None)
if rep_delay:
if not dynamic_reprate_enabled:
warnings.warn("Dynamic rep rates not supported on this backend. 'rep_time' will be "
"used instead.", RuntimeWarning)
if isinstance(rep_delay, list):
rep_delay = rep_delay[0]
rep_delay = rep_delay * 1e6 # convert sec to ΞΌs
parametric_pulses = parametric_pulses or getattr(backend_config, 'parametric_pulses', [])
# create run configuration and populate
run_config_dict = dict(qubit_lo_freq=qubit_lo_freq,
meas_lo_freq=meas_lo_freq,
qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range,
schedule_los=schedule_los,
meas_level=meas_level,
meas_return=meas_return,
meas_map=meas_map,
memory_slot_size=memory_slot_size,
rep_time=rep_time,
rep_delay=rep_delay,
parametric_pulses=parametric_pulses,
**run_config)
run_config = RunConfig(**{k: v for k, v in run_config_dict.items() if v is not None})
return run_config
|
11,878 | def _toqclass_helper(im):
data = None
colortable = None
# handle filename, if given instead of image name
if hasattr(im, "toUtf8"):
# FIXME - is this really the best way to do this?
im = str(im.toUtf8(), "utf-8")
if isPath(im):
im = Image.open(im)
if im.mode == "1":
format = QImage.Format_Mono
elif im.mode == "L":
format = QImage.Format_Indexed8
colortable = []
for i in range(256):
colortable.append(rgb(i, i, i))
elif im.mode == "P":
format = QImage.Format_Indexed8
colortable = []
palette = im.getpalette()
for i in range(0, len(palette), 3):
colortable.append(rgb(*palette[i : i + 3]))
elif im.mode == "RGB":
data = im.tobytes("raw", "BGRX")
format = QImage.Format_RGB32
elif im.mode == "RGBA":
data = im.tobytes("raw", "BGRA")
format = QImage.Format_ARGB32
else:
raise ValueError(f"unsupported image mode {repr(im.mode)}")
__data = data or align8to32(im.tobytes(), im.size[0], im.mode)
return {"data": __data, "im": im, "format": format, "colortable": colortable}
| def _toqclass_helper(im):
data = None
colortable = None
# handle filename, if given instead of image name
if hasattr(im, "toUtf8"):
# FIXME - is this really the best way to do this?
im = str(im.toUtf8(), "utf-8")
if isPath(im):
im = Image.open(im)
if im.mode == "1":
format = QImage.Format_Mono
elif im.mode == "L":
format = QImage.Format_Indexed8
colortable = []
for i in range(256):
colortable.append(rgb(i, i, i))
elif im.mode == "P":
format = QImage.Format_Indexed8
colortable = []
palette = im.getpalette()
for i in range(0, len(palette), 3):
colortable.append(rgb(*palette[i : i + 3]))
elif im.mode == "RGB":
data = im.tobytes("raw", "BGRX")
format = QImage.Format_RGB32
elif im.mode == "RGBA":
data = im.tobytes("raw", "BGRA")
format = QImage.Format_ARGB32
else:
raise ValueError(f"unsupported image mode {im.mode!r}")
__data = data or align8to32(im.tobytes(), im.size[0], im.mode)
return {"data": __data, "im": im, "format": format, "colortable": colortable}
|
33,112 | def get_time_as_str(t, timezone=None):
if timezone is None:
timezone = config.get("TIMEZONE")
s = (t - datetime.utcnow()).total_seconds()
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
d = timedelta(hours=h, minutes=m, seconds=s)
if timezone is not None:
disappear_time = datetime.now(tz=timezone) + d
else:
disappear_time = datetime.now() + d
# Time remaining in minutes and seconds
time_left = "%dm %ds" % (m, s) if h == 0 else "%dh %dm" % (h, m)
# Disappear time in 12h format, eg "2:30:16 PM"
time_12 = disappear_time.strftime("%I:%M:%S") \
+ disappear_time.strftime("%p").lower()
# Disappear time in 24h format including seconds, eg "14:30:16"
time_24 = disappear_time.strftime("%H:%M:%S")
# Get the same as above but without seconds
time_left_minutes = "%dm" % m if h == 0 else "%dh %dm" % (h, m)
time_12_minutes = disappear_time.strftime("%I:%M") \
+ disappear_time.strftime("%p").lower()
time_24_minutes = disappear_time.strftime("%H:%M")
return time_left, time_12, time_24, \
time_left_minutes, time_12_minutes, time_24_minutes
| def get_time_as_str(t, timezone=None):
if timezone is None:
timezone = config.get("TIMEZONE")
s = (t - datetime.utcnow()).total_seconds()
(m, s) = divmod(s, 60)
(h, m) = divmod(m, 60)
d = timedelta(hours=h, minutes=m, seconds=s)
if timezone is not None:
disappear_time = datetime.now(tz=timezone) + d
else:
disappear_time = datetime.now() + d
# Time remaining in minutes and seconds
time_left = "%dm %ds" % (m, s) if h == 0 else "%dh %dm" % (h, m)
# Disappear time in 12h format, eg "2:30:16 PM"
time_12 = disappear_time.strftime("%I:%M:%S") \
+ disappear_time.strftime("%p").lower()
# Disappear time in 24h format including seconds, eg "14:30:16"
time_24 = disappear_time.strftime("%H:%M:%S")
# Get the same as above but without seconds
time_left_minutes = "%dm" % m if h == 0 else "%dh %dm" % (h, m)
time_12_minutes = disappear_time.strftime("%I:%M") \
+ disappear_time.strftime("%p").lower()
time_24_minutes = disappear_time.strftime("%H:%M")
return time_left, time_12, time_24, \
time_no_sec, time_12h_no_sec, time_24h_no_sec
|
20,542 | def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# Default params
param = Param()
# Get parser info
fname_data = arguments.i
fname_mask = arguments.m
fname_mask_noise = arguments.m_noise
method = arguments.method
file_name = arguments.o
# Check parameters
if method in ['diff', 'single']:
if not fname_mask:
raise SCTArgumentParser.error(parser, f"You need to provide a mask with -method {method}.")
# Load data
im_data = Image(fname_data)
data = im_data.data
dim = len(data.shape)
if fname_mask:
mask = Image(fname_mask).data
# Check dimensionality
if method in ['diff', 'mult']:
if dim != 4:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 4.")
if method in ['single']:
if dim not in [3, 4]:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 3 or 4.")
# Check dimensionality of mask
if fname_mask:
if len(mask.shape) != 3:
raise ValueError(f"Input mask dimension: {dim}. Input dimension for the mask should be 3.")
# Retrieve selected volumes
index_vol = parse_num_list(arguments.vol)
if not index_vol:
if method in ['diff', 'mult']:
index_vol = range(data.shape[3])
elif method in ['single']:
index_vol = [0]
# Compute SNR
# NB: "time" is assumed to be the 4th dimension of the variable "data"
if method == 'mult':
# Compute mean and STD across time
data_mean = np.mean(data[:, :, :, index_vol], axis=3)
data_std = np.std(data[:, :, :, index_vol], axis=3, ddof=1)
# Generate mask where std is different from 0
mask_std_nonzero = np.where(data_std > param.almost_zero)
snr_map = np.zeros_like(data_mean)
snr_map[mask_std_nonzero] = data_mean[mask_std_nonzero] / data_std[mask_std_nonzero]
# Output SNR map
fname_snr = add_suffix(fname_data, '_SNR-' + method)
im_snr = empty_like(im_data)
im_snr.data = snr_map
im_snr.save(fname_snr, dtype=np.float32)
# Output non-zero mask
fname_stdnonzero = add_suffix(fname_data, '_mask-STD-nonzero' + method)
im_stdnonzero = empty_like(im_data)
data_stdnonzero = np.zeros_like(data_mean)
data_stdnonzero[mask_std_nonzero] = 1
im_stdnonzero.data = data_stdnonzero
im_stdnonzero.save(fname_stdnonzero, dtype=np.float32)
# Compute SNR in ROI
if fname_mask:
snr_roi = np.average(snr_map[mask_std_nonzero], weights=mask[mask_std_nonzero])
elif method == 'diff':
# Check user selected exactly 2 volumes for this method.
if not len(index_vol) == 2:
raise ValueError(f"Number of selected volumes: {len(index_vol)}. The method 'diff' should be used with "
f"exactly 2 volumes. You can specify the number of volumes with the flag '-vol'.")
data_2vol = np.take(data, index_vol, axis=3)
# Compute mean in ROI
data_mean = np.mean(data_2vol, axis=3)
mean_in_roi = np.average(data_mean, weights=mask)
data_sub = np.subtract(data_2vol[:, :, :, 1], data_2vol[:, :, :, 0])
_, std_in_roi = weighted_avg_and_std(data_sub, mask)
# Compute SNR, correcting for Rayleigh noise (see eq. 7 in Dietrich et al.)
snr_roi = (2 / np.sqrt(2)) * mean_in_roi / std_in_roi
elif method == 'single':
# Check that the input volume is 3D, or if it is 4D, that the user selected exactly 1 volume for this method.
if dim == 3:
data3d = data
elif dim == 4:
if not len(index_vol) == 1:
raise ValueError(f"Selected volumes: {index_vol}. The method 'single' should be used with "
f"exactly 1 volume. You can specify the number of volumes with the flag '-vol'.")
data3d = np.squeeze(data[..., index_vol])
# Check that input noise mask is provided
if fname_mask_noise:
mask_noise = Image(fname_mask_noise).data
else:
raise RuntimeError("A noise mask is mandatory with '-method single'.")
# Compute mean in ROI
mean_in_roi = np.average(data3d, weights=mask)
# Compute standard deviation in background
std_in_roi = np.std(data3d[mask_noise])
# Compute SNR, correcting for Rayleigh noise (see eq. A12 in Dietrich et al.)
snr_roi = np.sqrt((4 - np.pi) / 2) * mean_in_roi / std_in_roi
# Display result
if fname_mask:
printv('\nSNR_' + method + ' = ' + str(snr_roi) + '\n', type='info')
# Added function for text file
if file_name is not None:
with open(file_name, "w") as f:
f.write(str(snr_roi))
printv('\nFile saved to ' + file_name)
| def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_loglevel(verbose=verbose)
# Default params
param = Param()
# Get parser info
fname_data = arguments.i
fname_mask = arguments.m
fname_mask_noise = arguments.m_noise
method = arguments.method
file_name = arguments.o
# Check parameters
if method in ['diff', 'single']:
if not fname_mask:
raise SCTArgumentParser.error(parser, f"You need to provide a mask with -method {method}.")
# Load data
im_data = Image(fname_data)
data = im_data.data
dim = len(data.shape)
if fname_mask:
mask = Image(fname_mask).data
# Check dimensionality
if method in ['diff', 'mult']:
if dim != 4:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 4.")
if method in ['single']:
if dim not in [3, 4]:
raise ValueError(f"Input data dimension: {dim}. Input dimension for this method should be 3 or 4.")
# Check dimensionality of mask
if fname_mask:
if len(mask.shape) != 3:
raise ValueError(f"Mask should be a 3D image, but the input mask has shape '{mask.shape}'.")
# Retrieve selected volumes
index_vol = parse_num_list(arguments.vol)
if not index_vol:
if method in ['diff', 'mult']:
index_vol = range(data.shape[3])
elif method in ['single']:
index_vol = [0]
# Compute SNR
# NB: "time" is assumed to be the 4th dimension of the variable "data"
if method == 'mult':
# Compute mean and STD across time
data_mean = np.mean(data[:, :, :, index_vol], axis=3)
data_std = np.std(data[:, :, :, index_vol], axis=3, ddof=1)
# Generate mask where std is different from 0
mask_std_nonzero = np.where(data_std > param.almost_zero)
snr_map = np.zeros_like(data_mean)
snr_map[mask_std_nonzero] = data_mean[mask_std_nonzero] / data_std[mask_std_nonzero]
# Output SNR map
fname_snr = add_suffix(fname_data, '_SNR-' + method)
im_snr = empty_like(im_data)
im_snr.data = snr_map
im_snr.save(fname_snr, dtype=np.float32)
# Output non-zero mask
fname_stdnonzero = add_suffix(fname_data, '_mask-STD-nonzero' + method)
im_stdnonzero = empty_like(im_data)
data_stdnonzero = np.zeros_like(data_mean)
data_stdnonzero[mask_std_nonzero] = 1
im_stdnonzero.data = data_stdnonzero
im_stdnonzero.save(fname_stdnonzero, dtype=np.float32)
# Compute SNR in ROI
if fname_mask:
snr_roi = np.average(snr_map[mask_std_nonzero], weights=mask[mask_std_nonzero])
elif method == 'diff':
# Check user selected exactly 2 volumes for this method.
if not len(index_vol) == 2:
raise ValueError(f"Number of selected volumes: {len(index_vol)}. The method 'diff' should be used with "
f"exactly 2 volumes. You can specify the number of volumes with the flag '-vol'.")
data_2vol = np.take(data, index_vol, axis=3)
# Compute mean in ROI
data_mean = np.mean(data_2vol, axis=3)
mean_in_roi = np.average(data_mean, weights=mask)
data_sub = np.subtract(data_2vol[:, :, :, 1], data_2vol[:, :, :, 0])
_, std_in_roi = weighted_avg_and_std(data_sub, mask)
# Compute SNR, correcting for Rayleigh noise (see eq. 7 in Dietrich et al.)
snr_roi = (2 / np.sqrt(2)) * mean_in_roi / std_in_roi
elif method == 'single':
# Check that the input volume is 3D, or if it is 4D, that the user selected exactly 1 volume for this method.
if dim == 3:
data3d = data
elif dim == 4:
if not len(index_vol) == 1:
raise ValueError(f"Selected volumes: {index_vol}. The method 'single' should be used with "
f"exactly 1 volume. You can specify the number of volumes with the flag '-vol'.")
data3d = np.squeeze(data[..., index_vol])
# Check that input noise mask is provided
if fname_mask_noise:
mask_noise = Image(fname_mask_noise).data
else:
raise RuntimeError("A noise mask is mandatory with '-method single'.")
# Compute mean in ROI
mean_in_roi = np.average(data3d, weights=mask)
# Compute standard deviation in background
std_in_roi = np.std(data3d[mask_noise])
# Compute SNR, correcting for Rayleigh noise (see eq. A12 in Dietrich et al.)
snr_roi = np.sqrt((4 - np.pi) / 2) * mean_in_roi / std_in_roi
# Display result
if fname_mask:
printv('\nSNR_' + method + ' = ' + str(snr_roi) + '\n', type='info')
# Added function for text file
if file_name is not None:
with open(file_name, "w") as f:
f.write(str(snr_roi))
printv('\nFile saved to ' + file_name)
|
24,112 | def config_proxy_skip(proxies, uri, skip_proxy=False):
"""
Returns an amended copy of the proxies dictionary - used by `requests`,
it will disable the proxy if the uri provided is to be reached directly.
:param proxies dict with existing proxies: 'https', 'http', 'no' as pontential keys
:param uri uri to determine if proxy is necessary or not.
:param skip_proxy if True, the proxy dictionary returned will disable all proxies
"""
parsed_uri = urlparse(uri)
# disable proxy if necessary
if skip_proxy:
proxies['http'] = ''
proxies['https'] = ''
elif proxies.get('no'):
urls = []
if isinstance(proxies['no'], string_types):
urls = proxies['no'].replace(';', ',').split(",")
elif isinstance(proxies['no'], list):
urls = proxies['no']
for url in urls:
if url in parsed_uri.netloc:
if 'http' in proxies:
proxies.pop('http')
if 'https' in proxies:
proxies.pop('https')
return proxies
| def config_proxy_skip(proxies, uri, skip_proxy=False):
"""
Returns an amended copy of the proxies dictionary - used by `requests`,
it will disable the proxy if the uri provided is to be reached directly.
:param proxies dict with existing proxies: 'https', 'http', 'no' as pontential keys
:param uri URI to determine if a proxy is necessary or not.
:param skip_proxy if True, the proxy dictionary returned will disable all proxies
"""
parsed_uri = urlparse(uri)
# disable proxy if necessary
if skip_proxy:
proxies['http'] = ''
proxies['https'] = ''
elif proxies.get('no'):
urls = []
if isinstance(proxies['no'], string_types):
urls = proxies['no'].replace(';', ',').split(",")
elif isinstance(proxies['no'], list):
urls = proxies['no']
for url in urls:
if url in parsed_uri.netloc:
if 'http' in proxies:
proxies.pop('http')
if 'https' in proxies:
proxies.pop('https')
return proxies
|
7,893 | def _vectfit_xs(energy, ce_xs, mts, rtol=1e-3, atol=1e-5, orders=None,
n_vf_iter=30, log=False, path_out=None, **kwargs):
r"""Convert point-wise cross section to multipole data via Vector Fitting.
Parameters
----------
energy : np.ndarray
Energy array
ce_xs : np.ndarray
Point-wise cross sections to be fitted
mts : Iterable of int
Reaction list
rtol : float, optional
Relative error tolerance
atol : float, optional
Absolute error tolerance
orders : Iterable of int, optional
A list of orders (number of poles) to be searched
n_vf_iter : int, optional
Number of maximum VF iterations
log : bool or int, optional
Whether to print running logs
path_out : str, optional
Path to save the figures
**kwargs
Additional keyword arguments
Returns
-------
Tuple
(poles, residues)
"""
# import vectfit package: https://github.com/liangjg/vectfit
import vectfit as vf
ne = energy.size
nmt = len(mts)
if ce_xs.shape != (nmt, ne):
raise ValueError('Inconsistent cross section data.')
# construct test data: interpolate xs with finer grids
N_FINER = 10
ne_test = (ne-1)*N_FINER + 1
test_energy = np.interp(np.arange(ne_test),
np.arange(ne_test, step=N_FINER), energy)
test_energy[[0, -1]] = energy[[0, -1]] # avoid numerical issue
test_xs_ref = np.zeros((nmt, ne_test))
for i in range(nmt):
test_xs_ref[i] = np.interp(test_energy, energy, ce_xs[i])
if log:
print("Energy: {:.3e} to {:.3e} eV ({} points)".format(
energy[0], energy[-1], ne))
# inputs
f = ce_xs * energy # sigma*E
s = np.sqrt(energy) # sqrt(E)
test_s = np.sqrt(test_energy)
weight = 1.0/f
# very small cross sections can lead to huge weights, which will harm the
# fitting accuracy
MIN_CROSS_SECTION = 1e-7
for i in range(nmt):
if np.all(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i] = 1.0
elif np.any(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i, ce_xs[i]<=MIN_CROSS_SECTION] = \
max(weight[i, ce_xs[i]>MIN_CROSS_SECTION])
# detect peaks (resonances) and determine VF order search range
peaks, _ = find_peaks(ce_xs[0]+ce_xs[1])
n_peaks = peaks.size
if orders is not None:
# make sure orders are even integers
orders = list(set([int(i/2)*2 for i in orders if i>=2]))
else:
lowest_order = max(2, 2*n_peaks)
highest_order = max(200, 4*n_peaks)
orders = list(range(lowest_order, highest_order+1, 2))
if log:
print("Found {} peaks".format(n_peaks))
print("Fitting orders from {} to {}".format(orders[0], orders[-1]))
# perform VF with increasing orders
found_ideal = False
n_discarded = 0 # for accelation, number of discarded searches
best_quality = best_ratio = -np.inf
for i, order in enumerate(orders):
if log:
print("Order={}({}/{})".format(order, i, len(orders)))
# initial guessed poles
poles = np.linspace(s[0], s[-1], order//2)
poles = poles + poles*0.01j
poles = np.sort(np.append(poles, np.conj(poles)))
found_better = False
# fitting iteration
for i_vf in range(n_vf_iter):
if log >= DETAILED_LOGGING:
print("VF iteration {}/{}".format(i_vf+1, n_vf_iter))
# call vf
try:
poles, residues, cf, f_fit, rms = vf.vectfit(f, s, poles, weight)
except:
break
# convert real pole to conjugate pairs
n_real_poles = 0
new_poles = []
for p in poles:
p_r, p_i = np.real(p), np.imag(p)
if (s[0] <= p_r <= s[-1]) and p_i == 0.:
new_poles += [p_r+p_r*0.01j, p_r-p_r*0.01j]
n_real_poles += 1
else:
new_poles += [p]
new_poles = np.array(new_poles)
# re-calculate residues if poles changed
if n_real_poles > 0:
if log >= DETAILED_LOGGING:
print(" # real poles: {}".format(n_real_poles))
new_poles, residues, cf, f_fit, rms = \
vf.vectfit(f, s, new_poles, weight, skip_pole=True)
# assess the result on test grid
test_xs = vf.evaluate(test_s, new_poles, residues) / test_energy
abserr = np.abs(test_xs - test_xs_ref)
relerr = abserr / test_xs_ref
if np.any(np.isnan(abserr)):
maxre, ratio, ratio2 = np.inf, -np.inf, -np.inf
elif np.all(abserr <= atol):
maxre, ratio, ratio2 = 0., 1., 1.
else:
maxre = np.max(relerr[abserr > atol])
ratio = np.sum((relerr<rtol) | (abserr<atol)) / relerr.size
ratio2 = np.sum((relerr<10*rtol) | (abserr<atol)) / relerr.size
quality = ratio + ratio2 - min(0.1*maxre, 1) - 0.001*new_poles.size
if np.any(test_xs < -atol):
quality = -np.inf
if log >= DETAILED_LOGGING:
print(" # poles: {}".format(new_poles.size))
print(" Max relative error: {:.3f}%".format(maxre*100))
print(" Satisfaction: {:.1f}%, {:.1f}%".format(ratio*100, ratio2*100))
print(" Quality: {:.2f}".format(quality))
if quality > best_quality:
if log >= DETAILED_LOGGING:
print(" Best by far!")
found_better = True
best_quality, best_ratio = quality, ratio
best_poles, best_residues = new_poles, residues
best_test_xs, best_relerr = test_xs, relerr
if best_ratio >= 1.0:
if log:
print("Found ideal results. Stop!")
found_ideal = True
break
else:
if log >= DETAILED_LOGGING:
print(" Discarded!")
if found_ideal:
break
# acceleration
if found_better:
n_discarded = 0
else:
if order > max(2*n_peaks, 50) and best_ratio > 0.7:
n_discarded += 1
if n_discarded >= 10 or (n_discarded >= 5 and best_ratio > 0.9):
if log >= DETAILED_LOGGING:
print("Couldn't get better results. Stop!")
break
# merge conjugate poles
real_idx = []
conj_idx = []
found_conj = False
for i, p in enumerate(best_poles):
if found_conj:
found_conj = False
continue
if np.imag(p) == 0.:
real_idx.append(i)
else:
if i < best_poles.size and np.conj(p) == best_poles[i+1]:
found_conj = True
conj_idx.append(i)
else:
raise RuntimeError("Complex poles are not conjugate!")
if log:
print("Found {} real poles and {} conjugate complex pairs.".format(
len(real_idx), len(conj_idx)))
mp_poles = best_poles[real_idx+conj_idx]
mp_residues = np.concatenate((best_residues[:, real_idx],
best_residues[:, conj_idx]*2), axis=1)/1j
if log:
print("Final number of poles: {}".format(mp_poles.size))
if path_out:
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
if not os.path.exists(path_out):
os.makedirs(path_out)
for i, mt in enumerate(mts):
fig, ax1 = plt.subplots()
lns1 = ax1.loglog(test_energy, test_xs_ref[i], 'g', label="ACE xs")
lns2 = ax1.loglog(test_energy, best_test_xs[i], 'b', label="VF xs")
ax2 = ax1.twinx()
lns3 = ax2.loglog(test_energy, best_relerr[i], 'r',
label="Relative error", alpha=0.5)
lns = lns1 + lns2 + lns3
labels = [l.get_label() for l in lns]
ax1.legend(lns, labels, loc='best')
ax1.set_xlabel('energy (eV)')
ax1.set_ylabel('cross section (b)', color='b')
ax1.tick_params('y', colors='b')
ax2.set_ylabel('relative error', color='r')
ax2.tick_params('y', colors='r')
plt.title("MT {} vectfitted with {} poles".format(mt, mp_poles.size))
fig.tight_layout()
fig_file = os.path.join(path_out, "{:.0f}-{:.0f}_MT{}.png".format(
energy[0], energy[-1], mt))
plt.savefig(fig_file)
plt.close()
if log:
print("Saved figure: {}".format(fig_file))
return (mp_poles, mp_residues)
| def _vectfit_xs(energy, ce_xs, mts, rtol=1e-3, atol=1e-5, orders=None,
n_vf_iter=30, log=False, path_out=None, **kwargs):
r"""Convert point-wise cross section to multipole data via Vector Fitting.
Parameters
----------
energy : np.ndarray
Energy array
ce_xs : np.ndarray
Point-wise cross sections to be fitted
mts : Iterable of int
Reaction list
rtol : float, optional
Relative error tolerance
atol : float, optional
Absolute error tolerance
orders : Iterable of int, optional
A list of orders (number of poles) to be searched
n_vf_iter : int, optional
Number of maximum VF iterations
log : bool or int, optional
Whether to print running logs
path_out : str, optional
Path to save the figures
**kwargs
Additional keyword arguments
Returns
-------
tuple
(poles, residues)
"""
# import vectfit package: https://github.com/liangjg/vectfit
import vectfit as vf
ne = energy.size
nmt = len(mts)
if ce_xs.shape != (nmt, ne):
raise ValueError('Inconsistent cross section data.')
# construct test data: interpolate xs with finer grids
N_FINER = 10
ne_test = (ne-1)*N_FINER + 1
test_energy = np.interp(np.arange(ne_test),
np.arange(ne_test, step=N_FINER), energy)
test_energy[[0, -1]] = energy[[0, -1]] # avoid numerical issue
test_xs_ref = np.zeros((nmt, ne_test))
for i in range(nmt):
test_xs_ref[i] = np.interp(test_energy, energy, ce_xs[i])
if log:
print("Energy: {:.3e} to {:.3e} eV ({} points)".format(
energy[0], energy[-1], ne))
# inputs
f = ce_xs * energy # sigma*E
s = np.sqrt(energy) # sqrt(E)
test_s = np.sqrt(test_energy)
weight = 1.0/f
# very small cross sections can lead to huge weights, which will harm the
# fitting accuracy
MIN_CROSS_SECTION = 1e-7
for i in range(nmt):
if np.all(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i] = 1.0
elif np.any(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i, ce_xs[i]<=MIN_CROSS_SECTION] = \
max(weight[i, ce_xs[i]>MIN_CROSS_SECTION])
# detect peaks (resonances) and determine VF order search range
peaks, _ = find_peaks(ce_xs[0]+ce_xs[1])
n_peaks = peaks.size
if orders is not None:
# make sure orders are even integers
orders = list(set([int(i/2)*2 for i in orders if i>=2]))
else:
lowest_order = max(2, 2*n_peaks)
highest_order = max(200, 4*n_peaks)
orders = list(range(lowest_order, highest_order+1, 2))
if log:
print("Found {} peaks".format(n_peaks))
print("Fitting orders from {} to {}".format(orders[0], orders[-1]))
# perform VF with increasing orders
found_ideal = False
n_discarded = 0 # for accelation, number of discarded searches
best_quality = best_ratio = -np.inf
for i, order in enumerate(orders):
if log:
print("Order={}({}/{})".format(order, i, len(orders)))
# initial guessed poles
poles = np.linspace(s[0], s[-1], order//2)
poles = poles + poles*0.01j
poles = np.sort(np.append(poles, np.conj(poles)))
found_better = False
# fitting iteration
for i_vf in range(n_vf_iter):
if log >= DETAILED_LOGGING:
print("VF iteration {}/{}".format(i_vf+1, n_vf_iter))
# call vf
try:
poles, residues, cf, f_fit, rms = vf.vectfit(f, s, poles, weight)
except:
break
# convert real pole to conjugate pairs
n_real_poles = 0
new_poles = []
for p in poles:
p_r, p_i = np.real(p), np.imag(p)
if (s[0] <= p_r <= s[-1]) and p_i == 0.:
new_poles += [p_r+p_r*0.01j, p_r-p_r*0.01j]
n_real_poles += 1
else:
new_poles += [p]
new_poles = np.array(new_poles)
# re-calculate residues if poles changed
if n_real_poles > 0:
if log >= DETAILED_LOGGING:
print(" # real poles: {}".format(n_real_poles))
new_poles, residues, cf, f_fit, rms = \
vf.vectfit(f, s, new_poles, weight, skip_pole=True)
# assess the result on test grid
test_xs = vf.evaluate(test_s, new_poles, residues) / test_energy
abserr = np.abs(test_xs - test_xs_ref)
relerr = abserr / test_xs_ref
if np.any(np.isnan(abserr)):
maxre, ratio, ratio2 = np.inf, -np.inf, -np.inf
elif np.all(abserr <= atol):
maxre, ratio, ratio2 = 0., 1., 1.
else:
maxre = np.max(relerr[abserr > atol])
ratio = np.sum((relerr<rtol) | (abserr<atol)) / relerr.size
ratio2 = np.sum((relerr<10*rtol) | (abserr<atol)) / relerr.size
quality = ratio + ratio2 - min(0.1*maxre, 1) - 0.001*new_poles.size
if np.any(test_xs < -atol):
quality = -np.inf
if log >= DETAILED_LOGGING:
print(" # poles: {}".format(new_poles.size))
print(" Max relative error: {:.3f}%".format(maxre*100))
print(" Satisfaction: {:.1f}%, {:.1f}%".format(ratio*100, ratio2*100))
print(" Quality: {:.2f}".format(quality))
if quality > best_quality:
if log >= DETAILED_LOGGING:
print(" Best by far!")
found_better = True
best_quality, best_ratio = quality, ratio
best_poles, best_residues = new_poles, residues
best_test_xs, best_relerr = test_xs, relerr
if best_ratio >= 1.0:
if log:
print("Found ideal results. Stop!")
found_ideal = True
break
else:
if log >= DETAILED_LOGGING:
print(" Discarded!")
if found_ideal:
break
# acceleration
if found_better:
n_discarded = 0
else:
if order > max(2*n_peaks, 50) and best_ratio > 0.7:
n_discarded += 1
if n_discarded >= 10 or (n_discarded >= 5 and best_ratio > 0.9):
if log >= DETAILED_LOGGING:
print("Couldn't get better results. Stop!")
break
# merge conjugate poles
real_idx = []
conj_idx = []
found_conj = False
for i, p in enumerate(best_poles):
if found_conj:
found_conj = False
continue
if np.imag(p) == 0.:
real_idx.append(i)
else:
if i < best_poles.size and np.conj(p) == best_poles[i+1]:
found_conj = True
conj_idx.append(i)
else:
raise RuntimeError("Complex poles are not conjugate!")
if log:
print("Found {} real poles and {} conjugate complex pairs.".format(
len(real_idx), len(conj_idx)))
mp_poles = best_poles[real_idx+conj_idx]
mp_residues = np.concatenate((best_residues[:, real_idx],
best_residues[:, conj_idx]*2), axis=1)/1j
if log:
print("Final number of poles: {}".format(mp_poles.size))
if path_out:
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
if not os.path.exists(path_out):
os.makedirs(path_out)
for i, mt in enumerate(mts):
fig, ax1 = plt.subplots()
lns1 = ax1.loglog(test_energy, test_xs_ref[i], 'g', label="ACE xs")
lns2 = ax1.loglog(test_energy, best_test_xs[i], 'b', label="VF xs")
ax2 = ax1.twinx()
lns3 = ax2.loglog(test_energy, best_relerr[i], 'r',
label="Relative error", alpha=0.5)
lns = lns1 + lns2 + lns3
labels = [l.get_label() for l in lns]
ax1.legend(lns, labels, loc='best')
ax1.set_xlabel('energy (eV)')
ax1.set_ylabel('cross section (b)', color='b')
ax1.tick_params('y', colors='b')
ax2.set_ylabel('relative error', color='r')
ax2.tick_params('y', colors='r')
plt.title("MT {} vectfitted with {} poles".format(mt, mp_poles.size))
fig.tight_layout()
fig_file = os.path.join(path_out, "{:.0f}-{:.0f}_MT{}.png".format(
energy[0], energy[-1], mt))
plt.savefig(fig_file)
plt.close()
if log:
print("Saved figure: {}".format(fig_file))
return (mp_poles, mp_residues)
|
8,654 | def get_pid_filename(options, pid_dir):
"""Get the pid filename in ``pid_dir`` from the given ``options``.
:param options: command line options
:param str pid_dir: path to the pid directory
:return: absolute filename of the pid file
By default, it's ``sopel.pid``, but if a configuration filename is given
in the ``options``, its basename is used to generate the filename, as:
``sopel-{basename}.pid`` instead.
"""
name = 'sopel.pid'
if options.config:
basename = os.path.basename(options.config)
if basename.endswith('.cfg'):
basename = basename[:-4]
name = 'sopel-%s.pid' % basename
return os.path.abspath(os.path.join(pid_dir, name))
| def get_pid_filename(options, pid_dir):
"""Get the pid file name in ``pid_dir`` from the given ``options``.
:param options: command line options
:param str pid_dir: path to the pid directory
:return: absolute filename of the pid file
By default, it's ``sopel.pid``, but if a configuration filename is given
in the ``options``, its basename is used to generate the filename, as:
``sopel-{basename}.pid`` instead.
"""
name = 'sopel.pid'
if options.config:
basename = os.path.basename(options.config)
if basename.endswith('.cfg'):
basename = basename[:-4]
name = 'sopel-%s.pid' % basename
return os.path.abspath(os.path.join(pid_dir, name))
|
5,661 | def ks_2samp(data1, data2, alternative='two-sided', mode='auto'):
"""
Compute the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution. The alternative hypothesis
can be either 'two-sided' (default), 'less' or 'greater'.
Parameters
----------
data1, data2 : array_like, 1-Dimensional
Two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided, see explanation in Notes
* 'greater': one-sided, see explanation in Notes
mode : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS statistic.
pvalue : float
Two-tailed p-value.
See Also
--------
kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample KS test, the distribution is
assumed to be continuous.
In the one-sided test, the alternative is that the empirical
cumulative distribution function F(x) of the data1 variable is "less"
or "greater" than the empirical cumulative distribution function G(x)
of the data2 variable, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``.
If the KS statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
If the mode is 'auto', the computation is exact if the sample sizes are
less than 10000. For larger sizes, the computation uses the
Kolmogorov-Smirnov distributions to compute an approximate value.
The 'two-sided' 'exact' computation computes the complementary probability
and then subtracts from 1. As such, the minimum probability it can return
is about 1e-16. While the algorithm itself is exact, numerical
errors may accumulate for large sample sizes. It is most suited to
situations in which one of the sample sizes is only a few thousand.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
References
----------
.. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov
Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333334, 5.129279597781977e-05)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14691437867433876)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
if mode not in ['auto', 'exact', 'asymp']:
raise ValueError(f'Invalid value for mode: {mode}')
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'less', 'greater']:
raise ValueError(f'Invalid value for alternative: {alternative}')
MAX_AUTO_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N
if np.ma.is_masked(data1):
data1 = data1.compressed()
if np.ma.is_masked(data2):
data2 = data2.compressed()
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if min(n1, n2) == 0:
raise ValueError('Data passed to ks_2samp must not be empty')
data_all = np.concatenate([data1, data2])
# using searchsorted solves equal data problem
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
cddiffs = cdf1 - cdf2
minS = np.clip(-np.min(cddiffs), 0, 1) # Ensure sign of minS is not negative.
maxS = np.max(cddiffs)
alt2Dvalue = {'less': minS, 'greater': maxS, 'two-sided': max(minS, maxS)}
d = alt2Dvalue[alternative]
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -np.inf
original_mode = mode
if mode == 'auto':
mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int).max / n2g:
mode = 'asymp'
warnings.warn(
f"Exact ks_2samp calculation not possible with samples sizes "
f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning)
if mode == 'exact':
success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative)
if not success:
mode = 'asymp'
if original_mode == 'exact':
warnings.warn(f"ks_2samp: Exact calculation unsuccessful. "
f"Switching to mode={mode}.", RuntimeWarning)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptoptic formula.
# Ensure float to avoid overflow in multiplication
# sorted because the one-sided formula is not symmetric in n1, n2
m, n = sorted(np.array([n1, n2], float), reverse=True)
en = m * n / (m + n)
if alternative == 'two-sided':
prob = distributions.kstwo.sf(d, np.round(en))
else:
z = np.sqrt(en) * d
# Use Hodges' suggested approximation Eqn 5.3
# Requires m to be the larger of (n1, n2)
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
prob = np.clip(prob, 0, 1)
return KstestResult(d, prob)
| def ks_2samp(data1, data2, alternative='two-sided', mode='auto'):
"""
Compute the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution. The alternative hypothesis
can be either 'two-sided' (default), 'less' or 'greater'.
Parameters
----------
data1, data2 : array_like, 1-Dimensional
Two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided, see explanation in Notes
* 'greater': one-sided, see explanation in Notes
mode : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS statistic.
pvalue : float
Two-tailed p-value.
See Also
--------
kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample KS test, the distribution is
assumed to be continuous.
In the one-sided test, the alternative is that the empirical
cumulative distribution function F(x) of the data1 variable is "less"
or "greater" than the empirical cumulative distribution function G(x)
of the data2 variable, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``.
If the KS statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
If the mode is 'auto', the computation is exact if the sample sizes are
less than 10000. For larger sizes, the computation uses the
Kolmogorov-Smirnov distributions to compute an approximate value.
The 'two-sided' 'exact' computation computes the complementary probability
and then subtracts from 1. As such, the minimum probability it can return
is about 1e-16. While the algorithm itself is exact, numerical
errors may accumulate for large sample sizes. It is most suited to
situations in which one of the sample sizes is only a few thousand.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
References
----------
.. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov
Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333334, 5.129279597781977e-05)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14691437867433876)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
if mode not in ['auto', 'exact', 'asymp']:
raise ValueError(f'Invalid value for mode: {mode}')
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'less', 'greater']:
raise ValueError(f'Invalid value for alternative: {alternative}')
MAX_AUTO_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N
if np.ma.is_masked(data1):
data1 = data1.compressed()
if np.ma.is_masked(data2):
data2 = data2.compressed()
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if min(n1, n2) == 0:
raise ValueError('Data passed to ks_2samp must not be empty')
data_all = np.concatenate([data1, data2])
# using searchsorted solves equal data problem
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
cddiffs = cdf1 - cdf2
minS = np.clip(-np.min(cddiffs), 0, 1) # Ensure sign of minS is not negative.
maxS = np.max(cddiffs)
alt2Dvalue = {'less': minS, 'greater': maxS, 'two-sided': max(minS, maxS)}
d = alt2Dvalue[alternative]
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -np.inf
original_mode = mode
if mode == 'auto':
mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int).max / n2g:
mode = 'asymp'
warnings.warn(
f"Exact ks_2samp calculation not possible with samples sizes "
f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning)
if mode == 'exact':
success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative)
if not success:
mode = 'asymp'
if original_mode == 'exact':
warnings.warn(f"ks_2samp: Exact calculation unsuccessful. "
f"Switching to mode={mode}.", RuntimeWarning)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptoptic formula.
# Ensure float to avoid overflow in multiplication
# sorted because the one-sided formula is not symmetric in n1, n2
m, n = sorted([float(n1), float(n2)], reverse=True)
en = m * n / (m + n)
if alternative == 'two-sided':
prob = distributions.kstwo.sf(d, np.round(en))
else:
z = np.sqrt(en) * d
# Use Hodges' suggested approximation Eqn 5.3
# Requires m to be the larger of (n1, n2)
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
prob = np.clip(prob, 0, 1)
return KstestResult(d, prob)
|
19,614 | def getgccversion(chost=None):
"""
rtype: C{str}
return: the current in-use gcc version
"""
gcc_ver_command = ["gcc", "-dumpversion"]
gcc_ver_prefix = "gcc-"
# accept clang as system compiler too
clang_ver_command = ["clang", "--version"]
clang_ver_prefix = "clang-"
ubinpath = os.path.join("/", portage.const.EPREFIX, "usr", "bin")
def getclangversion(output):
version = re.search("clang version ([0-9.]+) ", output)
if version:
return version.group(1)
return "unknown"
gcc_not_found_error = red(
"!!! No gcc found. You probably need to 'source /etc/profile'\n"
+ "!!! to update the environment of this terminal and possibly\n"
+ "!!! other terminals also.\n"
)
if chost:
try:
proc = subprocess.Popen(
[ubinpath + "/" + "gcc-config", "-c"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
try:
proc = subprocess.Popen(
[ubinpath + "/" + chost + "-" + gcc_ver_command[0]]
+ gcc_ver_command[1:],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
# no GCC? try Clang
try:
proc = subprocess.Popen(
[ubinpath + "/" + chost + "-" + clang_ver_command[0]]
+ clang_ver_command[1:],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return clang_ver_prefix + getclangversion(myoutput)
try:
proc = subprocess.Popen(
gcc_ver_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
| def getgccversion(chost=None):
"""
rtype: C{str}
return: the current in-use gcc version
"""
gcc_ver_command = ["gcc", "-dumpversion"]
gcc_ver_prefix = "gcc-"
# accept clang as system compiler too
clang_ver_command = ["clang", "--version"]
clang_ver_prefix = "clang-"
ubinpath = os.path.join("/", portage.const.EPREFIX, "usr", "bin")
def getclangversion(output):
version = re.search("clang version ([0-9.]+) ", output)
if version:
return version.group(1)
return "unknown"
gcc_not_found_error = red(
"!!! No gcc found. You probably need to 'source /etc/profile'\n"
+ "!!! to update the environment of this terminal and possibly\n"
+ "!!! other terminals also.\n"
)
if chost:
try:
proc = subprocess.Popen(
[usr_bin_path + "/" + "gcc-config", "-c"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
try:
proc = subprocess.Popen(
[ubinpath + "/" + chost + "-" + gcc_ver_command[0]]
+ gcc_ver_command[1:],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
# no GCC? try Clang
try:
proc = subprocess.Popen(
[ubinpath + "/" + chost + "-" + clang_ver_command[0]]
+ clang_ver_command[1:],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return clang_ver_prefix + getclangversion(myoutput)
try:
proc = subprocess.Popen(
gcc_ver_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
except OSError:
myoutput = None
mystatus = 1
else:
myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
|
51,444 | def _plot1d(plotfunc):
"""
Decorator for common 2d plotting logic
Also adds the 2d plot method to class _PlotMethods
"""
commondoc = """
Parameters
----------
darray : DataArray
Must be 2 dimensional, unless creating faceted plots
x : string, optional
Coordinate for x axis. If None use darray.dims[1]
y : string, optional
Coordinate for y axis. If None use darray.dims[0]
hue : string, optional
Dimension or coordinate for which you want multiple lines plotted.
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib.axes.Axes, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
row : string, optional
If passed, make row faceted plots on this dimension name
col : string, optional
If passed, make column faceted plots on this dimension name
col_wrap : int, optional
Use together with ``col`` to wrap faceted plots
xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional
Specifies scaling for the x- and y-axes respectively
xticks, yticks : Specify tick locations for x- and y-axes
xlim, ylim : Specify x- and y-axes limits
xincrease : None, True, or False, optional
Should the values on the x axes be increasing from left to right?
if None, use the default for the matplotlib function.
yincrease : None, True, or False, optional
Should the values on the y axes be increasing from top to bottom?
if None, use the default for the matplotlib function.
add_labels : bool, optional
Use xarray metadata to label axes
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only used
for 2D and FacetGrid plots.
**kwargs : optional
Additional arguments to wrapped matplotlib function
Returns
-------
artist :
The same type of primitive artist that the wrapped matplotlib
function returns
"""
# Build on the original docstring
plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}"
# plotfunc and newplotfunc have different signatures:
# - plotfunc: (x, y, z, ax, **kwargs)
# - newplotfunc: (darray, *args, x, y, **kwargs)
# where plotfunc accepts numpy arrays, while newplotfunc accepts a DataArray
# and variable names. newplotfunc also explicitly lists most kwargs, so we
# need to shorten it
def signature(darray, *args, x, y, **kwargs):
pass
@override_signature(signature)
@functools.wraps(plotfunc)
def newplotfunc(
darray,
*args,
x=None,
y=None,
hue=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
# All 2d plots in xarray share this function signature.
# Method signature below should be consistent.
# Handle facetgrids first
if row or col:
allargs = locals().copy()
allargs.update(allargs.pop("kwargs"))
allargs.pop("darray")
allargs.pop("plotfunc")
if plotfunc.__name__ == "line":
return _easy_facetgrid(darray, line, kind="line", **allargs)
else:
raise ValueError(f"Faceting not implemented for {plotfunc.__name__}")
# The allargs dict passed to _easy_facetgrid above contains args
if args == ():
args = kwargs.pop("args", ())
else:
assert "args" not in kwargs
ax = get_axis(figsize, size, aspect, ax)
xplt, yplt, hueplt, hue_label = _infer_line_data(darray, x, y, hue)
primitive = plotfunc(xplt, yplt, ax, *args, add_labels=add_labels, **kwargs)
if add_labels:
ax.set_title(darray._title_for_slice())
if hueplt is not None and add_legend:
if plotfunc.__name__ == "hist":
handles = primitive[-1]
else:
handles = primitive
ax.legend(
handles=handles,
labels=list(hueplt.values),
title=label_from_attrs(hueplt),
)
_update_axes(
ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim
)
return primitive
# For use as DataArray.plot.plotmethod
@functools.wraps(newplotfunc)
def plotmethod(
_PlotMethods_obj,
*args,
x=None,
y=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs["darray"] = _PlotMethods_obj._da
allargs.update(kwargs)
for arg in ["_PlotMethods_obj", "newplotfunc", "kwargs"]:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc
| def _plot1d(plotfunc):
"""
Decorator for common 2d plotting logic
Also adds the 2d plot method to class _PlotMethods
"""
commondoc = """
Parameters
----------
darray : DataArray
Must be 2 dimensional, unless creating faceted plots
x : string, optional
Coordinate for x axis. If None use darray.dims[1]
y : string, optional
Coordinate for y axis. If None use darray.dims[0]
hue : string, optional
Dimension or coordinate for which you want multiple lines plotted.
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
Mutually exclusive with ``size`` and ``ax``.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
ax : matplotlib.axes.Axes, optional
Axis on which to plot this figure. By default, use the current axis.
Mutually exclusive with ``size`` and ``figsize``.
row : string, optional
If passed, make row faceted plots on this dimension name
col : string, optional
If passed, make column faceted plots on this dimension name
col_wrap : int, optional
Use together with ``col`` to wrap faceted plots
xscale, yscale : 'linear', 'symlog', 'log', 'logit', optional
Specifies scaling for the x- and y-axes respectively
xticks, yticks : Specify tick locations for x- and y-axes
xlim, ylim : Specify x- and y-axes limits
xincrease : None, True, or False, optional
Should the values on the x axes be increasing from left to right?
if None, use the default for the matplotlib function.
yincrease : None, True, or False, optional
Should the values on the y axes be increasing from top to bottom?
if None, use the default for the matplotlib function.
add_labels : bool, optional
Use xarray metadata to label axes
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only used
for FacetGrid plots.
**kwargs : optional
Additional arguments to wrapped matplotlib function
Returns
-------
artist :
The same type of primitive artist that the wrapped matplotlib
function returns
"""
# Build on the original docstring
plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}"
# plotfunc and newplotfunc have different signatures:
# - plotfunc: (x, y, z, ax, **kwargs)
# - newplotfunc: (darray, *args, x, y, **kwargs)
# where plotfunc accepts numpy arrays, while newplotfunc accepts a DataArray
# and variable names. newplotfunc also explicitly lists most kwargs, so we
# need to shorten it
def signature(darray, *args, x, y, **kwargs):
pass
@override_signature(signature)
@functools.wraps(plotfunc)
def newplotfunc(
darray,
*args,
x=None,
y=None,
hue=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
# All 2d plots in xarray share this function signature.
# Method signature below should be consistent.
# Handle facetgrids first
if row or col:
allargs = locals().copy()
allargs.update(allargs.pop("kwargs"))
allargs.pop("darray")
allargs.pop("plotfunc")
if plotfunc.__name__ == "line":
return _easy_facetgrid(darray, line, kind="line", **allargs)
else:
raise ValueError(f"Faceting not implemented for {plotfunc.__name__}")
# The allargs dict passed to _easy_facetgrid above contains args
if args == ():
args = kwargs.pop("args", ())
else:
assert "args" not in kwargs
ax = get_axis(figsize, size, aspect, ax)
xplt, yplt, hueplt, hue_label = _infer_line_data(darray, x, y, hue)
primitive = plotfunc(xplt, yplt, ax, *args, add_labels=add_labels, **kwargs)
if add_labels:
ax.set_title(darray._title_for_slice())
if hueplt is not None and add_legend:
if plotfunc.__name__ == "hist":
handles = primitive[-1]
else:
handles = primitive
ax.legend(
handles=handles,
labels=list(hueplt.values),
title=label_from_attrs(hueplt),
)
_update_axes(
ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim
)
return primitive
# For use as DataArray.plot.plotmethod
@functools.wraps(newplotfunc)
def plotmethod(
_PlotMethods_obj,
*args,
x=None,
y=None,
figsize=None,
size=None,
aspect=None,
ax=None,
row=None,
col=None,
col_wrap=None,
xincrease=True,
yincrease=True,
add_legend=True,
add_labels=True,
subplot_kws=None,
xscale=None,
yscale=None,
xticks=None,
yticks=None,
xlim=None,
ylim=None,
**kwargs,
):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs["darray"] = _PlotMethods_obj._da
allargs.update(kwargs)
for arg in ["_PlotMethods_obj", "newplotfunc", "kwargs"]:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc
|
17,352 | def dot(*arrays, dims=None, **kwargs):
"""Generalized dot product for xarray objects. Like np.einsum, but
provides a simpler interface based on array dimensions.
Parameters
----------
arrays: DataArray (or Variable) objects
Arrays to compute.
dims: xarray.ALL_DIMS, str or tuple of strings, optional
Which dimensions to sum over.
If not speciified, then all the common dimensions are summed over.
**kwargs: dict
Additional keyword arguments passed to numpy.einsum or
dask.array.einsum
Returns
-------
dot: DataArray
Examples
--------
>>> import numpy as np
>>> import xarray as xr
>>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=['a', 'b'])
>>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2),
... dims=['a', 'b', 'c'])
>>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=['c', 'd'])
>>> da_a
<xarray.DataArray (a: 3, b: 2)>
array([[0, 1],
[2, 3],
[4, 5]])
Dimensions without coordinates: a, b
>>> da_b
<xarray.DataArray (a: 3, b: 2, c: 2)>
array([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]]])
Dimensions without coordinates: a, b, c
>>> da_c
<xarray.DataArray (c: 2, d: 3)>
array([[0, 1, 2],
[3, 4, 5]])
Dimensions without coordinates: c, d
>>> xr.dot(da_a, da_b, dims=['a', 'b'])
<xarray.DataArray (c: 2)>
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dims=['a'])
<xarray.DataArray (b: 2, c: 2)>
array([[40, 46],
[70, 79]])
Dimensions without coordinates: b, c
>>> xr.dot(da_a, da_b, da_c, dims=['b', 'c'])
<xarray.DataArray (a: 3, d: 3)>
array([[ 9, 14, 19],
[ 93, 150, 207],
[273, 446, 619]])
Dimensions without coordinates: a, d
>>> xr.dot(da_a, da_b)
<xarray.DataArray (c: 2)>
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dims=xr.ALL_DIMS)
<xarray.DataArray ()>
array(235)
"""
from .dataarray import DataArray
from .variable import Variable
if any(not isinstance(arr, (Variable, DataArray)) for arr in arrays):
raise TypeError(
"Only xr.DataArray and xr.Variable are supported."
"Given {}.".format([type(arr) for arr in arrays])
)
if len(arrays) == 0:
raise TypeError("At least one array should be given.")
if isinstance(dims, str):
dims = (dims,)
common_dims = set.intersection(*[set(arr.dims) for arr in arrays])
all_dims = []
for arr in arrays:
all_dims += [d for d in arr.dims if d not in all_dims]
einsum_axes = "abcdefghijklmnopqrstuvwxyz"
dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)}
if dims is ALL_DIMS:
dims = all_dims
elif dims is None:
# find dimensions that occur more than one times
dim_counts = Counter()
for arr in arrays:
dim_counts.update(arr.dims)
dims = tuple(d for d, c in dim_counts.items() if c > 1)
dims = tuple(dims) # make dims a tuple
# dimensions to be parallelized
broadcast_dims = tuple(d for d in all_dims if d in common_dims and d not in dims)
input_core_dims = [
[d for d in arr.dims if d not in broadcast_dims] for arr in arrays
]
output_core_dims = [tuple(d for d in all_dims if d not in dims + broadcast_dims)]
# construct einsum subscripts, such as '...abc,...ab->...c'
# Note: input_core_dims are always moved to the last position
subscripts_list = [
"..." + "".join([dim_map[d] for d in ds]) for ds in input_core_dims
]
subscripts = ",".join(subscripts_list)
subscripts += "->..." + "".join([dim_map[d] for d in output_core_dims[0]])
# subscripts should be passed to np.einsum as arg, not as kwargs. We need
# to construct a partial function for apply_ufunc to work.
func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs)
result = apply_ufunc(
func,
*arrays,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
dask="allowed",
)
return result.transpose(*[d for d in all_dims if d in result.dims])
| def dot(*arrays, dims=None, **kwargs):
"""Generalized dot product for xarray objects. Like np.einsum, but
provides a simpler interface based on array dimensions.
Parameters
----------
arrays: DataArray (or Variable) objects
Arrays to compute.
dims: '...', str or tuple of strings, optional
Which dimensions to sum over.
If not speciified, then all the common dimensions are summed over.
**kwargs: dict
Additional keyword arguments passed to numpy.einsum or
dask.array.einsum
Returns
-------
dot: DataArray
Examples
--------
>>> import numpy as np
>>> import xarray as xr
>>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=['a', 'b'])
>>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2),
... dims=['a', 'b', 'c'])
>>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=['c', 'd'])
>>> da_a
<xarray.DataArray (a: 3, b: 2)>
array([[0, 1],
[2, 3],
[4, 5]])
Dimensions without coordinates: a, b
>>> da_b
<xarray.DataArray (a: 3, b: 2, c: 2)>
array([[[ 0, 1],
[ 2, 3]],
[[ 4, 5],
[ 6, 7]],
[[ 8, 9],
[10, 11]]])
Dimensions without coordinates: a, b, c
>>> da_c
<xarray.DataArray (c: 2, d: 3)>
array([[0, 1, 2],
[3, 4, 5]])
Dimensions without coordinates: c, d
>>> xr.dot(da_a, da_b, dims=['a', 'b'])
<xarray.DataArray (c: 2)>
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dims=['a'])
<xarray.DataArray (b: 2, c: 2)>
array([[40, 46],
[70, 79]])
Dimensions without coordinates: b, c
>>> xr.dot(da_a, da_b, da_c, dims=['b', 'c'])
<xarray.DataArray (a: 3, d: 3)>
array([[ 9, 14, 19],
[ 93, 150, 207],
[273, 446, 619]])
Dimensions without coordinates: a, d
>>> xr.dot(da_a, da_b)
<xarray.DataArray (c: 2)>
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dims=xr.ALL_DIMS)
<xarray.DataArray ()>
array(235)
"""
from .dataarray import DataArray
from .variable import Variable
if any(not isinstance(arr, (Variable, DataArray)) for arr in arrays):
raise TypeError(
"Only xr.DataArray and xr.Variable are supported."
"Given {}.".format([type(arr) for arr in arrays])
)
if len(arrays) == 0:
raise TypeError("At least one array should be given.")
if isinstance(dims, str):
dims = (dims,)
common_dims = set.intersection(*[set(arr.dims) for arr in arrays])
all_dims = []
for arr in arrays:
all_dims += [d for d in arr.dims if d not in all_dims]
einsum_axes = "abcdefghijklmnopqrstuvwxyz"
dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)}
if dims is ALL_DIMS:
dims = all_dims
elif dims is None:
# find dimensions that occur more than one times
dim_counts = Counter()
for arr in arrays:
dim_counts.update(arr.dims)
dims = tuple(d for d, c in dim_counts.items() if c > 1)
dims = tuple(dims) # make dims a tuple
# dimensions to be parallelized
broadcast_dims = tuple(d for d in all_dims if d in common_dims and d not in dims)
input_core_dims = [
[d for d in arr.dims if d not in broadcast_dims] for arr in arrays
]
output_core_dims = [tuple(d for d in all_dims if d not in dims + broadcast_dims)]
# construct einsum subscripts, such as '...abc,...ab->...c'
# Note: input_core_dims are always moved to the last position
subscripts_list = [
"..." + "".join([dim_map[d] for d in ds]) for ds in input_core_dims
]
subscripts = ",".join(subscripts_list)
subscripts += "->..." + "".join([dim_map[d] for d in output_core_dims[0]])
# subscripts should be passed to np.einsum as arg, not as kwargs. We need
# to construct a partial function for apply_ufunc to work.
func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs)
result = apply_ufunc(
func,
*arrays,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
dask="allowed",
)
return result.transpose(*[d for d in all_dims if d in result.dims])
|
5,707 | def _van_es_entropy(X, m):
"""Compute the van Es estimator as described in [6]"""
# No equation number, but referred to as HVE_mn.
# Typo: there should be a log within the summation.
n = X.shape[-1]
term1 = 1/(n-m) * np.sum(np.log((n+1)/m * (X[..., m:] - X[..., :-m])),
axis=-1)
k = np.arange(m, n+1)
return term1 + np.sum(1/k) + np.log(m) - np.log(n+1)
| def _van_es_entropy(X, m):
"""Compute the van Es estimator as described in [6]."""
# No equation number, but referred to as HVE_mn.
# Typo: there should be a log within the summation.
n = X.shape[-1]
term1 = 1/(n-m) * np.sum(np.log((n+1)/m * (X[..., m:] - X[..., :-m])),
axis=-1)
k = np.arange(m, n+1)
return term1 + np.sum(1/k) + np.log(m) - np.log(n+1)
|
15,349 | def library_payload(roon_server, zone_id, media_content_id):
"""Create response payload for the library."""
opts = {
"hierarchy": "browse",
"zone_or_output_id": zone_id,
"count": ITEM_LIMIT,
}
# Roon starts browsing for a zone where it left off - so start from the top unless otherwise specified
if media_content_id is None or media_content_id == "Explore":
opts["pop_all"] = True
content_id = "Explore"
else:
opts["item_key"] = media_content_id
content_id = media_content_id
result_header = roon_server.roonapi.browse_browse(opts)
_LOGGER.debug("Result_header %s", result_header)
header = result_header["list"]
title = header.get("title")
subtitle = header.get("subtitle")
if subtitle is None:
list_title = title
else:
list_title = f"{title} ({subtitle})"
total_count = header["count"]
library_image_id = header.get("image_key")
library_info = BrowseMedia(
title=list_title,
media_content_id=content_id,
media_content_type="library",
media_class=MEDIA_CLASS_DIRECTORY,
can_play=False,
can_expand=True,
children=[],
)
result_detail = roon_server.roonapi.browse_load(opts)
_LOGGER.debug("result_detail %s", result_detail)
items = result_detail["items"]
count = len(items)
if count < total_count:
_LOGGER.debug(
"Exceeded limit of %d, loaded %d/%d", ITEM_LIMIT, count, total_count
)
for item in items:
if item.get("title") in EXCLUDE_ITEMS:
continue
entry = item_payload(roon_server, item, library_image_id)
library_info.children.append(entry)
return library_info
| def library_payload(roon_server, zone_id, media_content_id):
"""Create response payload for the library."""
opts = {
"hierarchy": "browse",
"zone_or_output_id": zone_id,
"count": ITEM_LIMIT,
}
# Roon starts browsing for a zone where it left off - so start from the top unless otherwise specified
if media_content_id is None or media_content_id == "Explore":
opts["pop_all"] = True
content_id = "Explore"
else:
opts["item_key"] = media_content_id
content_id = media_content_id
result_header = roon_server.roonapi.browse_browse(opts)
_LOGGER.debug("Result header %s", result_header)
header = result_header["list"]
title = header.get("title")
subtitle = header.get("subtitle")
if subtitle is None:
list_title = title
else:
list_title = f"{title} ({subtitle})"
total_count = header["count"]
library_image_id = header.get("image_key")
library_info = BrowseMedia(
title=list_title,
media_content_id=content_id,
media_content_type="library",
media_class=MEDIA_CLASS_DIRECTORY,
can_play=False,
can_expand=True,
children=[],
)
result_detail = roon_server.roonapi.browse_load(opts)
_LOGGER.debug("result_detail %s", result_detail)
items = result_detail["items"]
count = len(items)
if count < total_count:
_LOGGER.debug(
"Exceeded limit of %d, loaded %d/%d", ITEM_LIMIT, count, total_count
)
for item in items:
if item.get("title") in EXCLUDE_ITEMS:
continue
entry = item_payload(roon_server, item, library_image_id)
library_info.children.append(entry)
return library_info
|
24,479 | def initialize_instance(values, **kwargs):
# TODO: remove when deprecation is finalized https://github.com/DataDog/integrations-core/pull/9340
if 'username' not in values and 'user' in values:
values['username'] = values['user']
_validate_authenticator_option(values)
if 'private_key_password' in values and 'private_key_path' not in values:
raise Exception(
'Option `private_key_password` is set but not option `private_key_path`. '
'Set `private_key_path` or remove `private_key_password` entry.'
)
if values.get('only_custom_queries', False) and len(values('metric_groups', [])) > 0:
raise Exception(
'Option `only_custom_queries` and `metric_groups` are not compatible.'
'`only_custom_queries` prevents `metric_groups` to be collected.'
)
return values
| def initialize_instance(values, **kwargs):
# TODO: remove when deprecation is finalized https://github.com/DataDog/integrations-core/pull/9340
if 'username' not in values and 'user' in values:
values['username'] = values['user']
_validate_authenticator_option(values)
if 'private_key_password' in values and 'private_key_path' not in values:
raise Exception(
'Option `private_key_password` is set but not option `private_key_path`. '
'Set `private_key_path` or remove `private_key_password` entry.'
)
if values.get('only_custom_queries', False) and len(values('metric_groups', [])) > 0:
raise ConfigurationError(
'Option `only_custom_queries` and `metric_groups` are not compatible.'
'`only_custom_queries` prevents `metric_groups` to be collected.'
)
return values
|
15,817 | def _first_ip_nexthop_from_route(routes: Iterable) -> None | str:
"""Find the first RTA_PREFSRC in the routes."""
_LOGGER.debug("Routes: %s", routes)
for route in routes:
for key, value in route["attrs"]:
if key == "RTA_PREFSRC":
return cast(str, value)
return None
| def _first_ip_nexthop_from_route(routes: Iterable) -> str | None:
"""Find the first RTA_PREFSRC in the routes."""
_LOGGER.debug("Routes: %s", routes)
for route in routes:
for key, value in route["attrs"]:
if key == "RTA_PREFSRC":
return cast(str, value)
return None
|
41,225 | def state_vector_to_probabilities(state_vector: 'cirq.STATE_VECTOR_LIKE') -> np.ndarray:
normalized_vector = format(state_vector)
return np.abs(normalized_vector) ** 2
| def state_vector_to_probabilities(state_vector: 'cirq.STATE_VECTOR_LIKE') -> np.ndarray:
normalized_vector = format(state_vector)
return np.abs(valid_state_vector) ** 2
|
3,167 | def var(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: int | None = None,
ddof: int = 1,
):
if not values.size or mask.all():
return libmissing.NA
return _reductions(
np.var, values=values, mask=mask, skipna=skipna, axis=axis, **{"ddof": ddof}
)
| def var(
values: np.ndarray,
mask: npt.NDArray[np.bool_],
*,
skipna: bool = True,
axis: int | None = None,
ddof: int = 1,
):
if not values.size or mask.all():
return libmissing.NA
return _reductions(
np.var, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof
)
|
56,696 | def get_language_name(code):
lang = web.ctx.site.get('/languages/' + code)
return lang.name if lang else "'%s' unknown" % code
| def read_author_facet(author_facet: str) -> tuple[str, str]:
key, name = author_facet.split(' ', 1)
return key, name
"""
>>> read_author_facet("OL26783A Leo Tolstoy")
('OL26783A', 'Leo Tolstoy')
"""
key, name = author_facet.strip().split(' ', 1)
lang = web.ctx.site.get('/languages/' + code)
return lang.name if lang else "'%s' unknown" % code
|
56,435 | def main(): # noqa: C901, CCR001
"""Run the main code of the program."""
try:
# arg parsing
parser = argparse.ArgumentParser(
prog=appcmdname,
description='Prints the current system and station (if docked) to stdout and optionally writes player '
'status, ship locations, ship loadout and/or station data to file. '
'Requires prior setup through the accompanying GUI app.'
)
parser.add_argument('-v', '--version', help='print program version and exit', action='store_const', const=True)
group_loglevel = parser.add_mutually_exclusive_group()
group_loglevel.add_argument('--loglevel',
metavar='loglevel',
help='Set the logging loglevel to one of: '
'CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE',
)
parser.add_argument(
'--trace',
help='Set the Debug logging loglevel to TRACE',
action='store_true',
)
parser.add_argument(
'--trace-on',
help='Mark the selected trace logging as active. "*" or "all" is equivalent to --trace-all',
action='append',
)
parser.add_argument(
"--trace-all",
help='Force trace level logging, with all possible --trace-on values active.',
action='store_true'
)
parser.add_argument('-a', metavar='FILE', help='write ship loadout to FILE in Companion API json format')
parser.add_argument('-e', metavar='FILE', help='write ship loadout to FILE in E:D Shipyard plain text format')
parser.add_argument('-l', metavar='FILE', help='write ship locations to FILE in CSV format')
parser.add_argument('-m', metavar='FILE', help='write station commodity market data to FILE in CSV format')
parser.add_argument('-o', metavar='FILE', help='write station outfitting data to FILE in CSV format')
parser.add_argument('-s', metavar='FILE', help='write station shipyard data to FILE in CSV format')
parser.add_argument('-t', metavar='FILE', help='write player status to FILE in CSV format')
parser.add_argument('-d', metavar='FILE', help='write raw JSON data to FILE')
parser.add_argument('-n', action='store_true', help='send data to EDDN')
parser.add_argument('-p', metavar='CMDR', help='Returns data from the specified player account')
parser.add_argument('-j', help=argparse.SUPPRESS) # Import JSON dump
args = parser.parse_args()
if args.version:
updater = Updater(provider='internal')
newversion: Optional[EDMCVersion] = updater.check_appcast()
if newversion:
print(f'{appversion()} ({newversion.title!r} is available)')
else:
print(appversion())
return
level_to_set: Optional[int] = None
if args.trace or args.trace_on:
level_to_set = logging.TRACE # type: ignore # it exists
logger.info('Setting TRACE level debugging due to either --trace or a --trace-on')
if args.trace_all or (args.trace_on and ('*' in args.trace_on or 'all' in args.trace_on)):
level_to_set = logging.TRACE_ALL # type: ignore # it exists
logger.info('Setting TRACE_ALL level debugging due to either --trace-all or a --trace-on *|all')
if level_to_set is not None:
logger.setLevel(level_to_set)
edmclogger.set_channels_loglevel(level_to_set)
elif args.loglevel:
if args.loglevel not in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE'):
print('loglevel must be one of: CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE', file=sys.stderr)
sys.exit(EXIT_ARGS)
edmclogger.set_channels_loglevel(args.loglevel)
logger.debug(f'Startup v{appversion()} : Running on Python v{sys.version}')
logger.debug(f'''Platform: {sys.platform}
argv[0]: {sys.argv[0]}
exec_prefix: {sys.exec_prefix}
executable: {sys.executable}
sys.path: {sys.path}'''
)
if args.trace_on and len(args.trace_on) > 0:
import config as conf_module
conf_module.trace_on = [x.casefold() for x in args.trace_on] # duplicate the list just in case
for d in conf_module.trace_on:
logger.info(f'marked {d} for TRACE')
log_locale('Initial Locale')
if args.j:
logger.debug('Import and collate from JSON dump')
# Import and collate from JSON dump
#
# Try twice, once with the system locale and once enforcing utf-8. If the file was made on the current
# system, chances are its the current locale, and not utf-8. Otherwise if it was copied, its probably
# utf8. Either way, try the system FIRST because reading something like cp1251 in UTF-8 results in garbage
# but the reverse results in an exception.
try:
data = json.load(open(args.j))
except UnicodeDecodeError:
data = json.load(open(args.j, encoding='utf-8'))
config.set('querytime', int(getmtime(args.j)))
else:
# Get state from latest Journal file
logger.debug('Getting state from latest journal file')
try:
monitor.currentdir = config.get_str('journaldir', default=config.default_journal_dir)
if not monitor.currentdir:
monitor.currentdir = config.default_journal_dir
logger.debug(f'logdir = "{monitor.currentdir}"')
logfiles = sorted((x for x in os.listdir(monitor.currentdir) if JOURNAL_RE.search(x)),
key=lambda x: x.split('.')[1:])
logfile = join(monitor.currentdir, logfiles[-1])
logger.debug(f'Using logfile "{logfile}"')
with open(logfile, 'r', encoding='utf-8') as loghandle:
for line in loghandle:
try:
monitor.parse_entry(line)
except Exception:
logger.debug(f'Invalid journal entry {line!r}')
except Exception:
logger.exception("Can't read Journal file")
sys.exit(EXIT_SYS_ERR)
if not monitor.cmdr:
logger.error('Not available while E:D is at the main menu')
sys.exit(EXIT_SYS_ERR)
# Get data from Companion API
if args.p:
logger.debug(f'Attempting to use commander "{args.p}"')
cmdrs = config.get_list('cmdrs', default=[])
if args.p in cmdrs:
idx = cmdrs.index(args.p)
else:
for idx, cmdr in enumerate(cmdrs):
if cmdr.lower() == args.p.lower():
break
else:
raise companion.CredentialsError()
companion.session.login(cmdrs[idx], monitor.is_beta)
else:
logger.debug(f'Attempting to use commander "{monitor.cmdr}" from Journal File')
cmdrs = config.get_list('cmdrs', default=[])
if monitor.cmdr not in cmdrs:
raise companion.CredentialsError()
companion.session.login(monitor.cmdr, monitor.is_beta)
# Set up the response queue
capi_response_queue: Queue = Queue()
companion.session.set_capi_response_queue(capi_response_queue)
# Initiate CAPI queries
querytime = int(time())
companion.session.station(query_time=querytime)
# Wait for the response
_capi_request_timeout = 60
try:
capi_response = capi_response_queue.get(block=True, timeout=_capi_request_timeout)
except queue.Empty:
logger.error(f'CAPI requests timed out after {_capi_request_timeout} seconds')
sys.exit(EXIT_SERVER)
else:
if isinstance(capi_response, companion.CAPIFailedRequest):
logger.trace_if('capi.worker', f'Failed Request: {capi_response.message}')
if capi_response.exception:
raise capi_response.exception
else:
raise ValueError(capi_response.message)
logger.trace_if('capi.worker', 'Answer is not a Failure')
if not isinstance(capi_response, companion.EDMCCAPIResponse):
msg = f"Response was neither CAPIFailedRequest nor EDMCAPIResponse: {type(capi_response)}"
logger.error(msg)
raise ValueError(msg)
data = capi_response.capi_data
config.set('querytime', querytime)
# Validation
if not deep_get(data, 'commander', 'name', default='').strip():
logger.error("No data['command']['name'] from CAPI")
sys.exit(EXIT_SERVER)
elif not deep_get(data, 'lastSystem', 'name') or \
data['commander'].get('docked') and not \
deep_get(data, 'lastStarport', 'name'): # Only care if docked
logger.error("No data['lastSystem']['name'] from CAPI")
sys.exit(EXIT_SERVER)
elif not deep_get(data, 'ship', 'modules') or not deep_get(data, 'ship', 'name', default=''):
logger.error("No data['ship']['modules'] from CAPI")
sys.exit(EXIT_SERVER)
elif args.j:
pass # Skip further validation
elif data['commander']['name'] != monitor.cmdr:
logger.error(f'Commander "{data["commander"]["name"]}" from CAPI doesn\'t match "{monitor.cmdr}" from Journal') # noqa: E501
sys.exit(EXIT_CREDENTIALS)
elif data['lastSystem']['name'] != monitor.system or \
((data['commander']['docked'] and data['lastStarport']['name'] or None) != monitor.station) or \
data['ship']['id'] != monitor.state['ShipID'] or \
data['ship']['name'].lower() != monitor.state['ShipType']:
logger.error('Mismatch(es) between CAPI and Journal for at least one of: StarSystem, Last Star Port, Ship ID or Ship Name/Type') # noqa: E501
sys.exit(EXIT_LAGGING)
# stuff we can do when not docked
if args.d:
logger.debug(f'Writing raw JSON data to "{args.d}"')
out = json.dumps(dict(data), ensure_ascii=False, indent=2, sort_keys=True, separators=(',', ': '))
with open(args.d, 'wb') as f:
f.write(out.encode("utf-8"))
if args.a:
logger.debug(f'Writing Ship Loadout in Companion API JSON format to "{args.a}"')
loadout.export(data, args.a)
if args.e:
logger.debug(f'Writing Ship Loadout in ED Shipyard plain text format to "{args.e}"')
edshipyard.export(data, args.e)
if args.l:
logger.debug(f'Writing Ship Locations in CSV format to "{args.l}"')
stats.export_ships(data, args.l)
if args.t:
logger.debug(f'Writing Player Status in CSV format to "{args.t}"')
stats.export_status(data, args.t)
if data['commander'].get('docked'):
print(f'{deep_get(data, "lastSystem", "name", default="Unknown")},'
f'{deep_get(data, "lastStarport", "name", default="Unknown")}'
)
else:
print(deep_get(data, 'lastSystem', 'name', default='Unknown'))
if (args.m or args.o or args.s or args.n or args.j):
if not data['commander'].get('docked'):
logger.error("Can't use -m, -o, -s, -n or -j because you're not currently docked!")
return
elif not deep_get(data, 'lastStarport', 'name'):
logger.error("No data['lastStarport']['name'] from CAPI")
sys.exit(EXIT_LAGGING)
# Ignore possibly missing shipyard info
elif not (data['lastStarport'].get('commodities') or data['lastStarport'].get('modules')):
logger.error("No commodities or outfitting (modules) in CAPI data")
return
else:
return
# Finally - the data looks sane and we're docked at a station
if args.j:
logger.debug('Importing data from the CAPI return...')
# Collate from JSON dump
collate.addcommodities(data)
collate.addmodules(data)
collate.addships(data)
if args.m:
logger.debug(f'Writing Station Commodity Market Data in CSV format to "{args.m}"')
if data['lastStarport'].get('commodities'):
# Fixup anomalies in the commodity data
fixed = companion.fixup(data)
commodity.export(fixed, COMMODITY_DEFAULT, args.m)
else:
logger.error("Station doesn't have a market")
if args.o:
if data['lastStarport'].get('modules'):
logger.debug(f'Writing Station Outfitting in CSV format to "{args.o}"')
outfitting.export(data, args.o)
else:
logger.error("Station doesn't supply outfitting")
if (args.s or args.n) and not args.j and not \
data['lastStarport'].get('ships') and data['lastStarport']['services'].get('shipyard'):
# Retry for shipyard
sleep(SERVER_RETRY)
new_data = companion.session.station()
# might have undocked while we were waiting for retry in which case station data is unreliable
if new_data['commander'].get('docked') and \
deep_get(new_data, 'lastSystem', 'name') == monitor.system and \
deep_get(new_data, 'lastStarport', 'name') == monitor.station:
data = new_data
if args.s:
if deep_get(data, 'lastStarport', 'ships', 'shipyard_list'):
logger.debug(f'Writing Station Shipyard in CSV format to "{args.s}"')
shipyard.export(data, args.s)
elif not args.j and monitor.stationservices and 'Shipyard' in monitor.stationservices:
logger.error('Failed to get shipyard data')
else:
logger.error("Station doesn't have a shipyard")
if args.n:
try:
eddn_sender = eddn.EDDN(None)
logger.debug('Sending Market, Outfitting and Shipyard data to EDDN...')
eddn_sender.export_commodities(data, monitor.is_beta, monitor.state['Odyssey'])
eddn_sender.export_outfitting(data, monitor.is_beta, monitor.state['Odyssey'])
eddn_sender.export_shipyard(data, monitor.is_beta, monitor.state['Odyssey'])
except Exception:
logger.exception('Failed to send data to EDDN')
except companion.ServerError:
logger.error('Frontier CAPI Server returned an error')
sys.exit(EXIT_SERVER)
except companion.CredentialsError:
logger.error('Frontier CAPI Server: Invalid Credentials')
sys.exit(EXIT_CREDENTIALS)
| def main(): # noqa: C901, CCR001
"""Run the main code of the program."""
try:
# arg parsing
parser = argparse.ArgumentParser(
prog=appcmdname,
description='Prints the current system and station (if docked) to stdout and optionally writes player '
'status, ship locations, ship loadout and/or station data to file. '
'Requires prior setup through the accompanying GUI app.'
)
parser.add_argument('-v', '--version', help='print program version and exit', action='store_const', const=True)
group_loglevel = parser.add_mutually_exclusive_group()
group_loglevel.add_argument('--loglevel',
metavar='loglevel',
help='Set the logging loglevel to one of: '
'CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE',
)
parser.add_argument(
'--trace',
help='Set the Debug logging loglevel to TRACE',
action='store_true',
)
parser.add_argument(
'--trace-on',
help='Mark the selected trace logging as active. "*" or "all" is equivalent to --trace-all',
action='append',
)
parser.add_argument(
"--trace-all",
help='Force trace level logging, with all possible --trace-on values active.',
action='store_true'
)
parser.add_argument('-a', metavar='FILE', help='write ship loadout to FILE in Companion API json format')
parser.add_argument('-e', metavar='FILE', help='write ship loadout to FILE in E:D Shipyard plain text format')
parser.add_argument('-l', metavar='FILE', help='write ship locations to FILE in CSV format')
parser.add_argument('-m', metavar='FILE', help='write station commodity market data to FILE in CSV format')
parser.add_argument('-o', metavar='FILE', help='write station outfitting data to FILE in CSV format')
parser.add_argument('-s', metavar='FILE', help='write station shipyard data to FILE in CSV format')
parser.add_argument('-t', metavar='FILE', help='write player status to FILE in CSV format')
parser.add_argument('-d', metavar='FILE', help='write raw JSON data to FILE')
parser.add_argument('-n', action='store_true', help='send data to EDDN')
parser.add_argument('-p', metavar='CMDR', help='Returns data from the specified player account')
parser.add_argument('-j', help=argparse.SUPPRESS) # Import JSON dump
args = parser.parse_args()
if args.version:
updater = Updater(provider='internal')
newversion: Optional[EDMCVersion] = updater.check_appcast()
if newversion:
print(f'{appversion()} ({newversion.title!r} is available)')
else:
print(appversion())
return
level_to_set: Optional[int] = None
if args.trace or args.trace_on:
level_to_set = logging.TRACE # type: ignore # it exists
logger.info('Setting TRACE level debugging due to either --trace or a --trace-on')
if args.trace_all or (args.trace_on and ('*' in args.trace_on or 'all' in args.trace_on)):
level_to_set = logging.TRACE_ALL # type: ignore # it exists
logger.info('Setting TRACE_ALL level debugging due to either --trace-all or a --trace-on *|all')
if level_to_set is not None:
logger.setLevel(level_to_set)
edmclogger.set_channels_loglevel(level_to_set)
elif args.loglevel:
if args.loglevel not in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE'):
print('loglevel must be one of: CRITICAL, ERROR, WARNING, INFO, DEBUG, TRACE', file=sys.stderr)
sys.exit(EXIT_ARGS)
edmclogger.set_channels_loglevel(args.loglevel)
logger.debug(f'Startup v{appversion()} : Running on Python v{sys.version}')
logger.debug(f'''Platform: {sys.platform}
argv[0]: {sys.argv[0]}
exec_prefix: {sys.exec_prefix}
executable: {sys.executable}
sys.path: {sys.path}'''
)
if args.trace_on and len(args.trace_on) > 0:
import config as conf_module
conf_module.trace_on = [x.casefold() for x in args.trace_on] # duplicate the list just in case
for d in conf_module.trace_on:
logger.info(f'marked {d} for TRACE')
log_locale('Initial Locale')
if args.j:
logger.debug('Import and collate from JSON dump')
# Import and collate from JSON dump
#
# Try twice, once with the system locale and once enforcing utf-8. If the file was made on the current
# system, chances are its the current locale, and not utf-8. Otherwise if it was copied, its probably
# utf8. Either way, try the system FIRST because reading something like cp1251 in UTF-8 results in garbage
# but the reverse results in an exception.
try:
data = json.load(open(args.j))
except UnicodeDecodeError:
data = json.load(open(args.j, encoding='utf-8'))
config.set('querytime', int(getmtime(args.j)))
else:
# Get state from latest Journal file
logger.debug('Getting state from latest journal file')
try:
monitor.currentdir = config.get_str('journaldir', default=config.default_journal_dir)
if not monitor.currentdir:
monitor.currentdir = config.default_journal_dir
logger.debug(f'logdir = "{monitor.currentdir}"')
logfiles = sorted((x for x in os.listdir(monitor.currentdir) if JOURNAL_RE.search(x)),
key=lambda x: x.split('.')[1:])
logfile = join(monitor.currentdir, logfiles[-1])
logger.debug(f'Using logfile "{logfile}"')
with open(logfile, 'r', encoding='utf-8') as loghandle:
for line in loghandle:
try:
monitor.parse_entry(line)
except Exception:
logger.debug(f'Invalid journal entry {line!r}')
except Exception:
logger.exception("Can't read Journal file")
sys.exit(EXIT_SYS_ERR)
if not monitor.cmdr:
logger.error('Not available while E:D is at the main menu')
sys.exit(EXIT_SYS_ERR)
# Get data from Companion API
if args.p:
logger.debug(f'Attempting to use commander "{args.p}"')
cmdrs = config.get_list('cmdrs', default=[])
if args.p in cmdrs:
idx = cmdrs.index(args.p)
else:
for idx, cmdr in enumerate(cmdrs):
if cmdr.lower() == args.p.lower():
break
else:
raise companion.CredentialsError()
companion.session.login(cmdrs[idx], monitor.is_beta)
else:
logger.debug(f'Attempting to use commander "{monitor.cmdr}" from Journal File')
cmdrs = config.get_list('cmdrs', default=[])
if monitor.cmdr not in cmdrs:
raise companion.CredentialsError()
companion.session.login(monitor.cmdr, monitor.is_beta)
# Set up the response queue
capi_response_queue: Queue = Queue()
companion.session.set_capi_response_queue(capi_response_queue)
# Initiate CAPI queries
querytime = int(time())
companion.session.station(query_time=querytime)
# Wait for the response
_capi_request_timeout = 60
try:
capi_response = capi_response_queue.get(block=True, timeout=_capi_request_timeout)
except queue.Empty:
logger.error(f'CAPI requests timed out after {_capi_request_timeout} seconds')
sys.exit(EXIT_SERVER)
else:
if isinstance(capi_response, companion.CAPIFailedRequest):
logger.trace_if('capi.worker', f'Failed Request: {capi_response.message}')
if capi_response.exception:
raise capi_response.exception
else:
raise ValueError(capi_response.message)
logger.trace_if('capi.worker', 'Answer is not a Failure')
if not isinstance(capi_response, companion.EDMCCAPIResponse):
msg = f"Response was neither CAPIFailedRequest nor EDMCAPIResponse: {type(capi_response)}"
logger.error(msg)
raise ValueError(msg)
data = capi_response.capi_data
config.set('querytime', querytime)
# Validation
if not deep_get(data, 'commander', 'name', default='').strip():
logger.error("No data['command']['name'] from CAPI")
sys.exit(EXIT_SERVER)
elif not deep_get(data, 'lastSystem', 'name') or \
data['commander'].get('docked') and not \
deep_get(data, 'lastStarport', 'name'): # Only care if docked
logger.error("No data['lastSystem']['name'] from CAPI")
sys.exit(EXIT_SERVER)
elif not deep_get(data, 'ship', 'modules') or not deep_get(data, 'ship', 'name', default=''):
logger.error("No data['ship']['modules'] from CAPI")
sys.exit(EXIT_SERVER)
elif args.j:
pass # Skip further validation
elif data['commander']['name'] != monitor.cmdr:
logger.error(f'Commander "{data["commander"]["name"]}" from CAPI doesn\'t match "{monitor.cmdr}" from Journal') # noqa: E501
sys.exit(EXIT_CREDENTIALS)
elif data['lastSystem']['name'] != monitor.system or \
((data['commander']['docked'] and data['lastStarport']['name'] or None) != monitor.station) or \
data['ship']['id'] != monitor.state['ShipID'] or \
data['ship']['name'].lower() != monitor.state['ShipType']:
logger.error('Mismatch(es) between CAPI and Journal for at least one of: StarSystem, Last Star Port, Ship ID or Ship Name/Type') # noqa: E501
sys.exit(EXIT_LAGGING)
# stuff we can do when not docked
if args.d:
logger.debug(f'Writing raw JSON data to "{args.d}"')
out = json.dumps(dict(data), ensure_ascii=False, indent=2, sort_keys=True, separators=(',', ': '))
with open(args.d, 'wb') as f:
f.write(out.encode("utf-8"))
if args.a:
logger.debug(f'Writing Ship Loadout in Companion API JSON format to "{args.a}"')
loadout.export(data, args.a)
if args.e:
logger.debug(f'Writing Ship Loadout in ED Shipyard plain text format to "{args.e}"')
edshipyard.export(data, args.e)
if args.l:
logger.debug(f'Writing Ship Locations in CSV format to "{args.l}"')
stats.export_ships(data, args.l)
if args.t:
logger.debug(f'Writing Player Status in CSV format to "{args.t}"')
stats.export_status(data, args.t)
if data['commander'].get('docked'):
print(f'{deep_get(data, "lastSystem", "name", default="Unknown")},'
f'{deep_get(data, "lastStarport", "name", default="Unknown")}'
)
else:
print(deep_get(data, 'lastSystem', 'name', default='Unknown'))
if (args.m or args.o or args.s or args.n or args.j):
if not data['commander'].get('docked'):
logger.error("Can't use -m, -o, -s, -n or -j because you're not currently docked!")
return
elif not deep_get(data, 'lastStarport', 'name'):
logger.error("No data['lastStarport']['name'] from CAPI")
sys.exit(EXIT_LAGGING)
# Ignore possibly missing shipyard info
elif not (data['lastStarport'].get('commodities') or data['lastStarport'].get('modules')):
logger.error("No commodities or outfitting (modules) in CAPI data")
return
else:
return
# Finally - the data looks sane and we're docked at a station
if args.j:
logger.debug('Importing data from the CAPI return...')
# Collate from JSON dump
collate.addcommodities(data)
collate.addmodules(data)
collate.addships(data)
if args.m:
logger.debug(f'Writing Station Commodity Market Data in CSV format to "{args.m}"')
if data['lastStarport'].get('commodities'):
# Fixup anomalies in the commodity data
fixed = companion.fixup(data)
commodity.export(fixed, COMMODITY_DEFAULT, args.m)
else:
logger.error("Station doesn't have a market")
if args.o:
if data['lastStarport'].get('modules'):
logger.debug(f'Writing Station Outfitting in CSV format to "{args.o}"')
outfitting.export(data, args.o)
else:
logger.error("Station doesn't supply outfitting")
if (args.s or args.n) and not args.j and not \
data['lastStarport'].get('ships') and data['lastStarport']['services'].get('shipyard'):
# Retry for shipyard
sleep(SERVER_RETRY)
new_data = companion.session.station()
# might have undocked while we were waiting for retry in which case station data is unreliable
if new_data['commander'].get('docked') and \
deep_get(new_data, 'lastSystem', 'name') == monitor.system and \
deep_get(new_data, 'lastStarport', 'name') == monitor.station:
data = new_data
if args.s:
if deep_get(data, 'lastStarport', 'ships', 'shipyard_list'):
logger.debug(f'Writing Station Shipyard in CSV format to "{args.s}"')
shipyard.export(data, args.s)
elif not args.j and monitor.stationservices and 'Shipyard' in monitor.stationservices:
logger.error('Failed to get shipyard data')
else:
logger.error("Station doesn't have a shipyard")
if args.n:
try:
eddn_sender = eddn.EDDN(None)
logger.debug('Sending Market, Outfitting and Shipyard data to EDDN...')
eddn_sender.export_commodities(data, monitor.is_beta, monitor.state['Odyssey'])
eddn_sender.export_outfitting(data, monitor.is_beta, monitor.state['Odyssey'])
eddn_sender.export_shipyard(data, monitor.is_beta, monitor.state['Odyssey'])
except Exception:
logger.exception('Failed to send data to EDDN')
except companion.ServerError:
logger.error('Frontier CAPI Server returned an error')
sys.exit(EXIT_SERVER)
except companion.CredentialsError:
logger.error('Frontier CAPI Server: Invalid Credentials')
sys.exit(EXIT_CREDENTIALS)
|
14,628 | def compute_evaluation_metrics(metrics,
labels,
predictions,
model_type,
label_dict=None,
grid_objective=None,
probability=False,
logger=None):
"""
Compute given metrics to evaluate the given predictions generated
by the given type of estimator against the given true labels.
Parameters
----------
metrics : list of str
List of metrics to compute.
labels : array-like
True labels to be used for computing the metrics.
predictions : array-like
The predictions to be used for computing the metrics.
model_type : str
One of "classifier" or "regressor".
label_dict : dict, optional
Dictionary mapping class labels to indices for classification.
Defaults to ``None``.
grid_objective : str, optional
The objective used for tuning the hyper-parameters of the model
that generated the predictions. If ``None``, it means that no
grid search was done.
Defaults to ``None``.
probability : bool, optional
Does the model output class probabilities?
Defaults to ``False``.
logger : logging.Logger, optional
A logger instance to use for logging messages and warnings.
If ``None``, a new one is created.
Defaults to ``None``.
Returns
-------
res : 5-tuple
The confusion matrix, the overall accuracy, the per-label
PRFs, the grid search objective function score, and the
additional evaluation metrics, if any. For regressors, the
first two elements are ``None``.
"""
# set up the logger
logger = logger if logger else logging.getLogger(__name__)
# warn if grid objective was also specified in metrics
if len(metrics) > 0 and grid_objective in metrics:
logger.warning(f"The grid objective '{grid_objective}' is also "
"specified as an evaluation metric. Since its value "
"is already included in the results as the objective "
"score, it will not be printed again in the list of "
"metrics.")
metrics = [metric for metric in metrics if metric != grid_objective]
# initialize a dictionary that will hold all of the metric scores
metric_scores = {metric: None for metric in metrics}
# if we are doing classification and are a probablistic
# learner or a soft-voting meta learner, then `yhat` are
# probabilities so we need to compute the class indices
# separately and save them too
if model_type == 'classifier' and probability:
class_probs = predictions
predictions = np.argmax(class_probs, axis=1)
# if we are a regressor or classifier not in probability
# mode, then we have the class indices already and there
# are no probabilities
else:
class_probs = None
# make a single list of metrics including the grid objective
# since it's easier to compute everything together
metrics_to_compute = [grid_objective] + metrics
for metric in metrics_to_compute:
# skip the None if we are not doing grid search
if not metric:
continue
# CASE 1: in probability mode for classification which means we
# need to either use the probabilities directly or infer the labels
# from them depending on the metric
if probability:
# there are three possible cases here:
# (a) if we are using a correlation metric or
# `average_precision` or `roc_auc` in a binary
# classification scenario, then we need to explicitly
# pass in the probabilities of the positive class.
# (b) if we are using `neg_log_loss`, then we
# just pass in the full probability array
# (c) we compute the most likely labels from the
# probabilities via argmax and use those
# for all other metrics
if (len(label_dict) == 2 and
(metric in CORRELATION_METRICS or
metric in ['average_precision', 'roc_auc']) and
metric != grid_objective):
logger.info("using probabilities for the positive class to "
f"compute '{metric}' for evaluation.")
preds_for_metric = class_probs[:, 1]
elif metric == 'neg_log_loss':
preds_for_metric = class_probs
else:
preds_for_metric = predictions
# CASE 2: no probability mode for classifier or regressor
# in which case we just use the predictions as they are
else:
preds_for_metric = predictions
try:
metric_scores[metric] = use_score_func(metric, labels, preds_for_metric)
except ValueError:
metric_scores[metric] = float('NaN')
# now separate out the grid objective score from the additional metric scores
# if a grid objective was actually passed in. If no objective was passed in
# then that score should just be none.
objective_score = None
additional_scores = metric_scores.copy()
if grid_objective:
objective_score = metric_scores[grid_objective]
del additional_scores[grid_objective]
# compute some basic statistics for regressors
if model_type == 'regressor':
result_dict = {'descriptive': defaultdict(dict)}
for table_label, y in zip(['actual', 'predicted'], [labels, predictions]):
result_dict['descriptive'][table_label]['min'] = min(y)
result_dict['descriptive'][table_label]['max'] = max(y)
result_dict['descriptive'][table_label]['avg'] = np.mean(y)
result_dict['descriptive'][table_label]['std'] = np.std(y)
result_dict['pearson'] = use_score_func('pearson', labels, predictions)
res = (None, None, result_dict, objective_score, additional_scores)
else:
# compute the confusion matrix and precision/recall/f1
# note that we are using the class indices here
# and not the actual class labels themselves
num_labels = len(label_dict)
conf_mat = confusion_matrix(labels,
predictions,
labels=list(range(num_labels)))
# Calculate metrics
overall_accuracy = accuracy_score(labels, predictions)
result_matrix = precision_recall_fscore_support(
labels, predictions, labels=list(range(num_labels)), average=None)
# Store results
result_dict = defaultdict(dict)
for actual_label in sorted(label_dict):
col = label_dict[actual_label]
result_dict[actual_label]["Precision"] = result_matrix[0][col]
result_dict[actual_label]["Recall"] = result_matrix[1][col]
result_dict[actual_label]["F-measure"] = result_matrix[2][col]
res = (conf_mat.tolist(), overall_accuracy, result_dict,
objective_score, additional_scores)
return res
| def compute_evaluation_metrics(metrics,
labels,
predictions,
model_type,
label_dict=None,
grid_objective=None,
probability=False,
logger=None):
"""
Compute given metrics to evaluate the given predictions generated
by the given type of estimator against the given true labels.
Parameters
----------
metrics : list of str
List of metrics to compute.
labels : array-like
True labels to be used for computing the metrics.
predictions : array-like
The predictions to be used for computing the metrics.
model_type : str
One of "classifier" or "regressor".
label_dict : dict, optional
Dictionary mapping class labels to indices for classification.
Defaults to ``None``.
grid_objective : str, optional
The objective used for tuning the hyper-parameters of the model
that generated the predictions. If ``None``, it means that no
grid search was done.
Defaults to ``None``.
probability : bool, optional
Does the model output class probabilities?
Defaults to ``False``.
logger : logging.Logger, optional
A logger instance to use for logging messages and warnings.
If ``None``, a new one is created.
Defaults to ``None``.
Returns
-------
res : 5-tuple
The confusion matrix, the overall accuracy, the per-label
PRFs, the grid search objective function score, and the
additional evaluation metrics, if any. For regressors, the
first two elements are ``None``.
"""
# set up the logger
logger = logger if logger else logging.getLogger(__name__)
# warn if grid objective was also specified in metrics
if len(metrics) > 0 and grid_objective in metrics:
logger.warning(f"The grid objective '{grid_objective}' is also "
"specified as an evaluation metric. Since its value "
"is already included in the results as the objective "
"score, it will not be printed again in the list of "
"metrics.")
metrics = [metric for metric in metrics if metric != grid_objective]
# initialize a dictionary that will hold all of the metric scores
metric_scores = {metric: None for metric in metrics}
# if we are doing classification and are a probablistic
# learner or a soft-voting meta learner, then `yhat` are
# probabilities so we need to compute the class indices
# separately and save them too
if model_type == 'classifier' and probability:
class_probs = predictions
predictions = np.argmax(class_probs, axis=1)
# if we are a regressor or classifier not in probability
# mode, then we have the class indices already and there
# are no probabilities
else:
class_probs = None
# make a single list of metrics including the grid objective
# since it's easier to compute everything together
metrics_to_compute = [grid_objective] + metrics
for metric in metrics_to_compute:
# skip the None if we are not doing grid search
if not metric:
continue
# CASE 1: in probability mode for classification which means we
# need to either use the probabilities directly or infer the labels
# from them depending on the metric
if probability:
# there are three possible cases here:
# (a) if we are using a correlation metric or
# `average_precision` or `roc_auc` in a binary
# classification scenario, then we need to explicitly
# pass in the probabilities of the positive class.
# (b) if we are using `neg_log_loss`, then we
# just pass in the full probability array
# (c) we compute the most likely labels from the
# probabilities via argmax and use those
# for all other metrics
if (len(label_dict) == 2 and
(metric in CORRELATION_METRICS or
metric in ['average_precision', 'roc_auc']) and
metric != grid_objective):
logger.info("using probabilities for the positive class to "
f"compute '{metric}' for evaluation.")
preds_for_metric = class_probs[:, 1]
elif metric == 'neg_log_loss':
preds_for_metric = class_probs
else:
preds_for_metric = predictions
# CASE 2: no probability mode for classifier or regressor
# in which case we just use the predictions as they are
else:
preds_for_metric = predictions
try:
metric_scores[metric] = use_score_func(metric, labels, preds_for_metric)
except ValueError:
metric_scores[metric] = float('NaN')
# now separate out the grid objective score from the additional metric scores
# if a grid objective was actually passed in. If no objective was passed in
# then that score should just be none.
objective_score = None
additional_scores = metric_scores.copy()
if grid_objective:
objective_score = metric_scores[grid_objective]
del additional_scores[grid_objective]
# compute some basic statistics for regressors
if model_type == 'regressor':
result_dict = {'descriptive': defaultdict(dict)}
for table_label, y in zip(['actual', 'predicted'], [labels, predictions]):
result_dict['descriptive'][table_label]['min'] = min(y)
result_dict['descriptive'][table_label]['max'] = max(y)
result_dict['descriptive'][table_label]['avg'] = np.mean(y)
result_dict['descriptive'][table_label]['std'] = np.std(y)
result_dict['pearson'] = use_score_func('pearson', labels, predictions)
res = (None, None, result_dict, objective_score, additional_scores)
else:
# compute the confusion matrix and precision/recall/f1
# note that we are using the class indices here
# and not the actual class labels themselves
num_labels = len(label_dict)
conf_mat = confusion_matrix(labels,
predictions,
labels=list(range(num_labels)))
# Calculate metrics
overall_accuracy = accuracy_score(labels, predictions)
result_matrix = precision_recall_fscore_support(
labels, predictions, labels=list(range(num_labels)), average=None)
# Store results
result_dict = defaultdict(dict)
for actual_label in sorted(label_dict):
col = label_dict[actual_label]
result_dict[actual_label]["Precision"] = result_matrix[0][col]
result_dict[actual_label]["Recall"] = result_matrix[1][col]
result_dict[actual_label]["F-measure"] = result_matrix[2][col]
res = (conf_mat.tolist(), overall_accuracy, result_dict,
objective_score, additional_scores)
return res
|
23,224 | def import_ivar_by_name(name: str, prefixes: List[str] = [None], last_errors = None) -> Tuple[str, Any, Any, str]:
"""Import an instance variable that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
try:
name, attr = name.rsplit(".", 1)
real_name, obj, parent, modname = import_by_name(name, prefixes)
qualname = real_name.replace(modname + ".", "")
analyzer = ModuleAnalyzer.for_module(getattr(obj, '__module__', modname))
analyzer.analyze()
# check for presence in `annotations` to include dataclass attributes
if (qualname, attr) in analyzer.attr_docs or (qualname, attr) in analyzer.annotations:
return real_name + "." + attr, INSTANCEATTR, obj, modname
except (ImportError, ValueError, PycodeError) as e:
if last_errors is not None:
last_errors.append(str(e.args[0]))
pass
raise ImportError
| def import_ivar_by_name(name: str, prefixes: List[str] = [None], last_errors = None) -> Tuple[str, Any, Any, str]:
"""Import an instance variable that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
try:
name, attr = name.rsplit(".", 1)
real_name, obj, parent, modname = import_by_name(name, prefixes)
qualname = real_name.replace(modname + ".", "")
analyzer = ModuleAnalyzer.for_module(getattr(obj, '__module__', modname))
analyzer.analyze()
# check for presence in `annotations` to include dataclass attributes
if (qualname, attr) in analyzer.attr_docs or (qualname, attr) in analyzer.annotations:
return real_name + "." + attr, INSTANCEATTR, obj, modname
except (ImportError, ValueError, PycodeError) as e:
if last_errors is not None:
last_errors.append(str(e))
pass
raise ImportError
|
35,646 | def resnet152(weights: Optional[ResNet152Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
if "pretrained" in kwargs:
warnings.warn("The argument pretrained is deprecated, please use weights instead.")
weights = ResNet152Weights.ImageNet1K_RefV1 if kwargs.pop("pretrained") else None
weights = ResNet152Weights.verify(weights)
return _resnet(BasicBlock, [3, 8, 36, 3], weights, progress, **kwargs)
| def resnet152(weights: Optional[ResNet152Weights] = None, progress: bool = True, **kwargs: Any) -> ResNet:
if "pretrained" in kwargs:
warnings.warn("The argument pretrained is deprecated, please use weights instead.")
weights = ResNet152Weights.ImageNet1K_RefV1 if kwargs.pop("pretrained") else None
weights = ResNet152Weights.verify(weights)
return _resnet(Bottleneck, [3, 8, 36, 3], weights, progress, **kwargs)
|
23,676 | def gti_dirint(poa_global, aoi, solar_zenith, solar_azimuth, times,
surface_tilt, surface_azimuth, pressure=101325.,
use_delta_kt_prime=True, temp_dew=None, albedo=.25,
model='perez', model_perez='allsitescomposite1990',
calculate_gt_90=True, max_iterations=30):
"""
Determine GHI, DNI, DHI from POA global using the GTI DIRINT model.
The GTI DIRINT model is described in [1]_.
.. warning::
Model performance is poor for AOI greater than approximately
80 degrees `and` plane of array irradiance greater than
approximately 200 W/m^2.
Parameters
----------
poa_global : array-like
Plane of array global irradiance in W/m^2.
aoi : array-like
Angle of incidence of solar rays with respect to the module
surface normal.
solar_zenith : array-like
True (not refraction-corrected) solar zenith angles in decimal
degrees.
solar_azimuth : array-like
Solar azimuth angles in decimal degrees.
times : DatetimeIndex
Time indices for the input array-like data.
surface_tilt : numeric
Surface tilt angles in decimal degrees. Tilt must be >=0 and
<=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90).
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. surface_azimuth must
be >=0 and <=360. The Azimuth convention is defined as degrees
east of north (e.g. North = 0, South=180 East = 90, West = 270).
pressure : numeric, default 101325.0
The site pressure in Pascal. Pressure may be measured or an
average pressure may be calculated from site altitude.
use_delta_kt_prime : bool, default True
If True, indicates that the stability index delta_kt_prime is
included in the model. The stability index adjusts the estimated
DNI in response to dynamics in the time series of GHI. It is
recommended that delta_kt_prime is not used if the time between
GHI points is 1.5 hours or greater. If use_delta_kt_prime=True,
input data must be Series.
temp_dew : None, float, or array-like, default None
Surface dew point temperatures, in degrees C. Values of temp_dew
may be numeric or NaN. Any single time period point with a
temp_dew=NaN does not have dew point improvements applied. If
temp_dew is not provided, then dew point improvements are not
applied.
albedo : numeric, default 0.25
Gound surface albedo. [unitless]
model : String, default 'perez'
Irradiance model. See :py:func:`get_sky_diffuse` for allowed values.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`perez`.
calculate_gt_90 : bool, default True
Controls if the algorithm evaluates inputs with AOI >= 90 degrees.
If False, returns nan for AOI >= 90 degrees. Significant speed ups
can be achieved by setting this parameter to False.
max_iterations : int, default 30
Maximum number of iterations for the aoi < 90 deg algorithm.
Returns
-------
data : DataFrame
Contains the following keys/columns:
* ``ghi``: the modeled global horizontal irradiance in W/m^2.
* ``dni``: the modeled direct normal irradiance in W/m^2.
* ``dhi``: the modeled diffuse horizontal irradiance in
W/m^2.
References
----------
.. [1] B. Marion, A model for deriving the direct normal and
diffuse horizontal irradiance from the global tilted
irradiance, Solar Energy 122, 1037-1046.
:doi:`10.1016/j.solener.2015.10.024`
"""
aoi_lt_90 = aoi < 90
# for AOI less than 90 degrees
ghi, dni, dhi, kt_prime = _gti_dirint_lt_90(
poa_global, aoi, aoi_lt_90, solar_zenith, solar_azimuth, times,
surface_tilt, surface_azimuth, pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime, temp_dew=temp_dew,
albedo=albedo, model=model, model_perez=model_perez,
max_iterations=max_iterations)
# for AOI greater than or equal to 90 degrees
if calculate_gt_90:
ghi_gte_90, dni_gte_90, dhi_gte_90 = _gti_dirint_gte_90(
poa_global, aoi, solar_zenith, solar_azimuth,
surface_tilt, times, kt_prime,
pressure=pressure, temp_dew=temp_dew, albedo=albedo)
else:
ghi_gte_90, dni_gte_90, dhi_gte_90 = np.nan, np.nan, np.nan
# put the AOI < 90 and AOI >= 90 conditions together
output = OrderedDict()
output['ghi'] = ghi.where(aoi_lt_90, ghi_gte_90)
output['dni'] = dni.where(aoi_lt_90, dni_gte_90)
output['dhi'] = dhi.where(aoi_lt_90, dhi_gte_90)
output = pd.DataFrame(output, index=times)
return output
| def gti_dirint(poa_global, aoi, solar_zenith, solar_azimuth, times,
surface_tilt, surface_azimuth, pressure=101325.,
use_delta_kt_prime=True, temp_dew=None, albedo=.25,
model='perez', model_perez='allsitescomposite1990',
calculate_gt_90=True, max_iterations=30):
"""
Determine GHI, DNI, DHI from POA global using the GTI DIRINT model.
The GTI DIRINT model is described in [1]_.
.. warning::
Model performance is poor for AOI greater than approximately
80 degrees `and` plane of array irradiance greater than
approximately 200 W/m^2.
Parameters
----------
poa_global : array-like
Plane of array global irradiance in W/m^2.
aoi : array-like
Angle of incidence of solar rays with respect to the module
surface normal.
solar_zenith : array-like
True (not refraction-corrected) solar zenith angles in decimal
degrees.
solar_azimuth : array-like
Solar azimuth angles in decimal degrees.
times : DatetimeIndex
Time indices for the input array-like data.
surface_tilt : numeric
Surface tilt angles in decimal degrees. Tilt must be >=0 and
<=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90).
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. surface_azimuth must
be >=0 and <=360. The Azimuth convention is defined as degrees
east of north (e.g. North = 0, South=180 East = 90, West = 270).
pressure : numeric, default 101325.0
The site pressure in Pascal. Pressure may be measured or an
average pressure may be calculated from site altitude.
use_delta_kt_prime : bool, default True
If True, indicates that the stability index delta_kt_prime is
included in the model. The stability index adjusts the estimated
DNI in response to dynamics in the time series of GHI. It is
recommended that delta_kt_prime is not used if the time between
GHI points is 1.5 hours or greater. If use_delta_kt_prime=True,
input data must be Series.
temp_dew : None, float, or array-like, default None
Surface dew point temperatures, in degrees C. Values of temp_dew
may be numeric or NaN. Any single time period point with a
temp_dew=NaN does not have dew point improvements applied. If
temp_dew is not provided, then dew point improvements are not
applied.
albedo : numeric, default 0.25
Ground surface albedo. [unitless]
model : String, default 'perez'
Irradiance model. See :py:func:`get_sky_diffuse` for allowed values.
model_perez : String, default 'allsitescomposite1990'
Used only if model='perez'. See :py:func:`perez`.
calculate_gt_90 : bool, default True
Controls if the algorithm evaluates inputs with AOI >= 90 degrees.
If False, returns nan for AOI >= 90 degrees. Significant speed ups
can be achieved by setting this parameter to False.
max_iterations : int, default 30
Maximum number of iterations for the aoi < 90 deg algorithm.
Returns
-------
data : DataFrame
Contains the following keys/columns:
* ``ghi``: the modeled global horizontal irradiance in W/m^2.
* ``dni``: the modeled direct normal irradiance in W/m^2.
* ``dhi``: the modeled diffuse horizontal irradiance in
W/m^2.
References
----------
.. [1] B. Marion, A model for deriving the direct normal and
diffuse horizontal irradiance from the global tilted
irradiance, Solar Energy 122, 1037-1046.
:doi:`10.1016/j.solener.2015.10.024`
"""
aoi_lt_90 = aoi < 90
# for AOI less than 90 degrees
ghi, dni, dhi, kt_prime = _gti_dirint_lt_90(
poa_global, aoi, aoi_lt_90, solar_zenith, solar_azimuth, times,
surface_tilt, surface_azimuth, pressure=pressure,
use_delta_kt_prime=use_delta_kt_prime, temp_dew=temp_dew,
albedo=albedo, model=model, model_perez=model_perez,
max_iterations=max_iterations)
# for AOI greater than or equal to 90 degrees
if calculate_gt_90:
ghi_gte_90, dni_gte_90, dhi_gte_90 = _gti_dirint_gte_90(
poa_global, aoi, solar_zenith, solar_azimuth,
surface_tilt, times, kt_prime,
pressure=pressure, temp_dew=temp_dew, albedo=albedo)
else:
ghi_gte_90, dni_gte_90, dhi_gte_90 = np.nan, np.nan, np.nan
# put the AOI < 90 and AOI >= 90 conditions together
output = OrderedDict()
output['ghi'] = ghi.where(aoi_lt_90, ghi_gte_90)
output['dni'] = dni.where(aoi_lt_90, dni_gte_90)
output['dhi'] = dhi.where(aoi_lt_90, dhi_gte_90)
output = pd.DataFrame(output, index=times)
return output
|
22,323 | def clean_multiline_string(multiline_string, sep='\n'):
"""
Dedent, split, remove first and last empty lines, rejoin.
"""
multiline_string = textwrap.dedent(multiline_string)
string_list = multiline_string.split(sep)
if not string_list[0]:
string_list = string_list[1:]
if not string_list[-1]:
string_list = string_list[:-1]
# return '\n'.join( docstrings )
return ''.join((s + '\n') for s in string_list)
| def clean_multiline_string(multiline_string, sep='\n'):
"""
Dedent, split, remove first and last empty lines, rejoin.
"""
multiline_string = textwrap.dedent(multiline_string)
string_list = multiline_string.split(sep)
if not string_list[0]:
string_list = string_list[1:]
if not string_list[-1]:
string_list = string_list[:-1]
return '\n'.join(string_list) + '\n'
|
46,697 | def create_new_key_pair(secret: bytes = None) -> KeyPair:
"""
Returns a new Secp256k1 keypair derived from the provided ``secret``,
a sequence of bytes corresponding to some integer between 0 and the group order.
A valid secret is created if ``None`` is passed.
"""
private_key = Secp256k1PrivateKey.new()
public_key = private_key.get_public_key()
return KeyPair(private_key, public_key)
| def create_new_key_pair(secret: bytes = None) -> KeyPair:
"""
Returns a new Secp256k1 keypair derived from the provided ``secret``,
a sequence of bytes corresponding to some integer between 0 and the group order.
A valid secret is created if ``None`` is passed.
"""
private_key = Secp256k1PrivateKey.new(secret)
public_key = private_key.get_public_key()
return KeyPair(private_key, public_key)
|
54,863 | def laplacian_pe(g, k, padding=False, return_eigval=False):
r"""Laplacian Positional Encoding, as introduced in
`Benchmarking Graph Neural Networks
<https://arxiv.org/abs/2003.00982>`__
This function computes the laplacian positional encodings as the
k smallest non-trivial eigenvectors.
Parameters
----------
g : DGLGraph
The input graph. Must be homogeneous.
k : int
Number of smallest non-trivial eigenvectors to use for positional encoding.
padding : bool
If padding=='false', raise exception when k>=n.
Else return (n-1) laplacian positional encodings and (k-n+1) zero encodings
(padding) when k>=n.
n is the number of nodes in the given graph.
return_eigval : bool
If return_eigval=='True', return laplacian eigenvalues together with eigenvectors.
Else return laplacian eigenvectors only.
Returns
-------
Tensor
The laplacian positional encodings of shape :math:`(N, k)`, where :math:`N` is the
number of nodes in the input graph.
Two tensors
The eigenvalues of shape :math:`N` and
the laplacian positional encodings of shape :math:`(N, k)`, where :math:`N` is the
number of nodes in the input graph.
Example
-------
>>> import dgl
>>> g = dgl.rand_graph(6, 20)
>>> dgl.laplacian_pe(g, 2)
tensor([[ 0.7251, -0.6224],
[-0.0000, 0.5390],
[-0.4065, 0.4042],
[-0.0744, 0.0519],
[-0.4694, -0.1556],
[ 0.2881, -0.3631]])
>>> dgl.laplacian_pe(g, 6, padding=True)
tensor([[-7.2513e-01, -6.2238e-01, -1.8517e-09, 1.8517e-09, 4.3006e-01, 0.0000e+00],
[ 0.0000e+00, 5.3900e-01, -0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],
[ 4.0653e-01, 4.0425e-01, 6.4145e-09, -6.4145e-09, 2.8766e-01, 0.0000e+00],
[ 7.4425e-02, 5.1865e-02, -7.0711e-01, -7.0711e-01, -6.5471e-01, 0.0000e+00],
[ 4.6942e-01, -1.5560e-01, -7.4068e-09, 7.4068e-09, 3.3216e-01, 0.0000e+00],
[-2.8814e-01, -3.6306e-01, 7.0711e-01, 7.0711e-01, -4.3968e-01, 0.0000e+00]])
>>> dgl.laplacian_pe(g, 6, padding=True, return_eigval=True)
(tensor([0.5684, 0.7500, 1.0000, 1.0000, 1.5149, nan]),
tensor([[ 7.2513e-01, -6.2238e-01, 1.8517e-09, -1.8517e-09, -4.3006e-01, 0.0000e+00],
[-0.0000e+00, 5.3900e-01, 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],
[-4.0653e-01, 4.0425e-01, -6.4145e-09, 6.4145e-09, -2.8766e-01, 0.0000e+00],
[-7.4425e-02, 5.1865e-02, 7.0711e-01, 7.0711e-01, 6.5471e-01, 0.0000e+00],
[-4.6942e-01, -1.5560e-01, 7.4068e-09, -7.4068e-09, -3.3216e-01, 0.0000e+00],
[ 2.8814e-01, -3.6306e-01, -7.0711e-01, -7.0711e-01, 4.3968e-01, 0.0000e+00]]))
"""
# check for the "k < n" constraint
n = g.num_nodes()
if not padding and n <= k:
assert "the number of eigenvectors k must be smaller than the number of nodes n, " + \
f"{k} and {n} detected."
# get laplacian matrix as I - D^-0.5 * A * D^-0.5
A = g.adj(scipy_fmt='csr') # adjacency matrix
N = sparse.diags(F.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) # D^-1/2
L = sparse.eye(g.num_nodes()) - N * A * N
# select eigenvectors with smaller eigenvalues O(n + klogk)
EigVal, EigVec = np.linalg.eig(L.toarray())
max_freqs = min(n-1,k)
kpartition_indices = np.argpartition(EigVal, max_freqs)[:max_freqs+1]
topk_eigvals = EigVal[kpartition_indices]
topk_indices = kpartition_indices[topk_eigvals.argsort()][1:]
topk_EigVec = np.real(EigVec[:, topk_indices])
eigvals = F.tensor(np.real(EigVal[topk_indices]), dtype=F.float32)
# get random flip signs
rand_sign = 2 * (np.random.rand(max_freqs) > 0.5) - 1.
PE = F.astype(F.tensor(rand_sign * topk_EigVec), F.float32)
# add paddings
if n <= k:
temp_EigVec = F.zeros([n, k-n+1], dtype=F.float32, ctx=F.context(PE))
PE = F.cat([PE, temp_EigVec], dim=1)
temp_EigVal = F.tensor(np.full(k-n+1, np.nan), F.float32)
eigvals = F.cat([eigvals, temp_EigVal], dim=0)
if return_eigval:
return eigvals, PE
return PE
| def laplacian_pe(g, k, padding=False, return_eigval=False):
r"""Laplacian Positional Encoding, as introduced in
`Benchmarking Graph Neural Networks
<https://arxiv.org/abs/2003.00982>`__
This function computes the laplacian positional encodings as the
k smallest non-trivial eigenvectors.
Parameters
----------
g : DGLGraph
The input graph. Must be homogeneous.
k : int
Number of smallest non-trivial eigenvectors to use for positional encoding.
padding : bool
If padding=='false', raise exception when k>=n.
Else return (n-1) laplacian positional encodings and (k-n+1) zero encodings
(padding) when k>=n.
n is the number of nodes in the given graph.
return_eigval : bool
If True, return laplacian eigenvalues together with eigenvectors.
Otherwise, return laplacian eigenvectors only. Default: False.
Returns
-------
Tensor
The laplacian positional encodings of shape :math:`(N, k)`, where :math:`N` is the
number of nodes in the input graph.
Two tensors
The eigenvalues of shape :math:`N` and
the laplacian positional encodings of shape :math:`(N, k)`, where :math:`N` is the
number of nodes in the input graph.
Example
-------
>>> import dgl
>>> g = dgl.rand_graph(6, 20)
>>> dgl.laplacian_pe(g, 2)
tensor([[ 0.7251, -0.6224],
[-0.0000, 0.5390],
[-0.4065, 0.4042],
[-0.0744, 0.0519],
[-0.4694, -0.1556],
[ 0.2881, -0.3631]])
>>> dgl.laplacian_pe(g, 6, padding=True)
tensor([[-7.2513e-01, -6.2238e-01, -1.8517e-09, 1.8517e-09, 4.3006e-01, 0.0000e+00],
[ 0.0000e+00, 5.3900e-01, -0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],
[ 4.0653e-01, 4.0425e-01, 6.4145e-09, -6.4145e-09, 2.8766e-01, 0.0000e+00],
[ 7.4425e-02, 5.1865e-02, -7.0711e-01, -7.0711e-01, -6.5471e-01, 0.0000e+00],
[ 4.6942e-01, -1.5560e-01, -7.4068e-09, 7.4068e-09, 3.3216e-01, 0.0000e+00],
[-2.8814e-01, -3.6306e-01, 7.0711e-01, 7.0711e-01, -4.3968e-01, 0.0000e+00]])
>>> dgl.laplacian_pe(g, 6, padding=True, return_eigval=True)
(tensor([0.5684, 0.7500, 1.0000, 1.0000, 1.5149, nan]),
tensor([[ 7.2513e-01, -6.2238e-01, 1.8517e-09, -1.8517e-09, -4.3006e-01, 0.0000e+00],
[-0.0000e+00, 5.3900e-01, 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],
[-4.0653e-01, 4.0425e-01, -6.4145e-09, 6.4145e-09, -2.8766e-01, 0.0000e+00],
[-7.4425e-02, 5.1865e-02, 7.0711e-01, 7.0711e-01, 6.5471e-01, 0.0000e+00],
[-4.6942e-01, -1.5560e-01, 7.4068e-09, -7.4068e-09, -3.3216e-01, 0.0000e+00],
[ 2.8814e-01, -3.6306e-01, -7.0711e-01, -7.0711e-01, 4.3968e-01, 0.0000e+00]]))
"""
# check for the "k < n" constraint
n = g.num_nodes()
if not padding and n <= k:
assert "the number of eigenvectors k must be smaller than the number of nodes n, " + \
f"{k} and {n} detected."
# get laplacian matrix as I - D^-0.5 * A * D^-0.5
A = g.adj(scipy_fmt='csr') # adjacency matrix
N = sparse.diags(F.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) # D^-1/2
L = sparse.eye(g.num_nodes()) - N * A * N
# select eigenvectors with smaller eigenvalues O(n + klogk)
EigVal, EigVec = np.linalg.eig(L.toarray())
max_freqs = min(n-1,k)
kpartition_indices = np.argpartition(EigVal, max_freqs)[:max_freqs+1]
topk_eigvals = EigVal[kpartition_indices]
topk_indices = kpartition_indices[topk_eigvals.argsort()][1:]
topk_EigVec = np.real(EigVec[:, topk_indices])
eigvals = F.tensor(np.real(EigVal[topk_indices]), dtype=F.float32)
# get random flip signs
rand_sign = 2 * (np.random.rand(max_freqs) > 0.5) - 1.
PE = F.astype(F.tensor(rand_sign * topk_EigVec), F.float32)
# add paddings
if n <= k:
temp_EigVec = F.zeros([n, k-n+1], dtype=F.float32, ctx=F.context(PE))
PE = F.cat([PE, temp_EigVec], dim=1)
temp_EigVal = F.tensor(np.full(k-n+1, np.nan), F.float32)
eigvals = F.cat([eigvals, temp_EigVal], dim=0)
if return_eigval:
return eigvals, PE
return PE
|
52,356 | def maybe_base64(value: Optional[str]) -> Optional[str]:
if not value:
return None
elif set(value).issubset(_base64_alphabet):
return value
return base64.b64encode(value.encode()).decode()
| def decode_base64(value: Optional[str]) -> Optional[str]:
"""
Lazy decoding of base64-encoded data.
If the character set indicates a base64-encoded content, decoding is attempted and the result returned.
Otherwise, or if the decoding fails, the original string is returned.
"""
if not value:
return None
elif set(value).issubset(_base64_alphabet):
try:
return base64.b64encode(value.encode()).decode()
except binascii.Error:
return Value
else:
return value
|
48,921 | def _array_repeat_op(translator, expr):
op = expr.op()
arr, times = op.args
arr_ = _parenthesize(translator, arr)
times_ = _parenthesize(translator, times)
repeat_fmt = '''
(select arrayFlatten(groupArray(arr)) from
(
select {} as arr
from system.numbers
limit {}
))
'''
return repeat_fmt.format(arr_, times_)
| def _array_repeat_op(translator, expr):
op = expr.op()
arr, times = op.args
arr_ = _parenthesize(translator, arr)
times_ = _parenthesize(translator, times)
repeat_fmt = '''
(select arrayFlatten(groupArray(arr)) from
(
select {arr_} as arr
from system.numbers
limit {times_}
))
'''
return repeat_fmt
|
6,881 | def execute():
if frappe.db.exists("DocType", "Desk Page"):
if frappe.db.exists('DocType Workspace'):
# this patch was not added initially, so this page might still exist
frappe.delete_doc('DocType', 'Desk Page')
else:
rename_doc('DocType', 'Desk Page', 'Workspace')
rename_doc('DocType', 'Desk Chart', 'Workspace Chart', ignore_if_exists=True)
rename_doc('DocType', 'Desk Link', 'Workspace Link', ignore_if_exists=True)
rename_doc('DocType', 'Desk Shortcut', 'Workspace Shortcut', ignore_if_exists=True)
frappe.reload_doc('desk', 'doctype', 'workspace')
| def execute():
if frappe.db.exists("DocType", "Desk Page"):
if frappe.db.exists('DocType', 'Workspace'):
# this patch was not added initially, so this page might still exist
frappe.delete_doc('DocType', 'Desk Page')
else:
rename_doc('DocType', 'Desk Page', 'Workspace')
rename_doc('DocType', 'Desk Chart', 'Workspace Chart', ignore_if_exists=True)
rename_doc('DocType', 'Desk Link', 'Workspace Link', ignore_if_exists=True)
rename_doc('DocType', 'Desk Shortcut', 'Workspace Shortcut', ignore_if_exists=True)
frappe.reload_doc('desk', 'doctype', 'workspace')
|
4,242 | def combine_evoked(all_evoked, weights):
"""Merge evoked data by weighted addition or subtraction.
Each `~mne.Evoked` in ``all_evoked`` should have the same channels and the
same time instants. Subtraction can be performed by passing
``weights=[1, -1]``.
.. Warning::
Other than cases like simple subtraction mentioned above (where all
weights are -1 or 1), if you provide numeric weights instead of using
``'equal'`` or ``'nave'``, the resulting `~mne.Evoked` object's
``.nave`` attribute (which is used to scale noise covariance when
applying the inverse operator) may not be suitable for inverse imaging.
Parameters
----------
all_evoked : list of Evoked
The evoked datasets.
weights : list of float | 'equal' | 'nave'
The weights to apply to the data of each evoked instance, or a string
describing the weighting strategy to apply: ``'nave'`` computes
sum-to-one weights proportional to each object's ``nave`` attribute;
``'equal'`` weights each `~mne.Evoked` by ``1 / len(all_evoked)``.
Returns
-------
evoked : Evoked
The new evoked data.
Notes
-----
.. versionadded:: 0.9.0
"""
naves = np.array([evk.nave for evk in all_evoked], float)
if isinstance(weights, str):
_check_option('weights', weights, ['nave', 'equal'])
if weights == 'nave':
weights = naves / naves.sum()
else:
weights = np.ones_like(naves) / len(naves)
else:
weights = np.squeeze(weights).astype(float)
if weights.ndim != 1 or weights.size != len(all_evoked):
raise ValueError('weights must be the same size as all_evoked')
# cf. https://en.wikipedia.org/wiki/Weighted_arithmetic_mean, section on
# "weighted sample variance". The variance of a weighted sample mean is:
#
# ΟΒ² = wβΒ² ΟβΒ² + wβΒ² ΟβΒ² + ... + wβΒ² ΟβΒ²
#
# We estimate the variance of each evoked instance as 1 / nave to get:
#
# ΟΒ² = wβΒ² / naveβ + wβΒ² / naveβ + ... + wβΒ² / naveβ
#
# And our resulting nave is the reciprocal of this:
new_nave = 1. / np.sum(weights ** 2 / naves)
# This general formula is equivalent to formulae in Matti's manual
# (pp 128-129), where:
# new_nave = sum(naves) when weights='nave' and
# new_nave = 1. / sum(1. / naves) when weights are all 1.
all_evoked = _check_evokeds_ch_names_times(all_evoked)
evoked = all_evoked[0].copy()
# use union of bad channels
bads = list(set(b for e in all_evoked for b in e.info['bads']))
evoked.info['bads'] = bads
evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked))
evoked.nave = new_nave
evoked.comment = ' + '.join(f'{w:0.3f} Γ {e.comment or "unknown"}'
for w, e in zip(weights, all_evoked))
return evoked
| def combine_evoked(all_evoked, weights):
"""Merge evoked data by weighted addition or subtraction.
Each `~mne.Evoked` in ``all_evoked`` should have the same channels and the
same time instants. Subtraction can be performed by passing
``weights=[1, -1]``.
.. Warning::
Other than cases like simple subtraction mentioned above (where all
weights are -1 or 1), if you provide numeric weights instead of using
``'equal'`` or ``'nave'``, the resulting `~mne.Evoked` object's
``.nave`` attribute (which is used to scale noise covariance when
applying the inverse operator) may not be suitable for inverse imaging.
Parameters
----------
all_evoked : list of Evoked
The evoked datasets.
weights : list of float | 'equal' | 'nave'
The weights to apply to the data of each evoked instance, or a string
describing the weighting strategy to apply: ``'nave'`` computes
sum-to-one weights proportional to each object's ``nave`` attribute;
``'equal'`` weights each `~mne.Evoked` by ``1 / len(all_evoked)``.
Returns
-------
evoked : Evoked
The new evoked data.
Notes
-----
.. versionadded:: 0.9.0
"""
naves = np.array([evk.nave for evk in all_evoked], float)
if isinstance(weights, str):
_check_option('weights', weights, ['nave', 'equal'])
if weights == 'nave':
weights = naves / naves.sum()
else:
weights = np.ones_like(naves) / len(naves)
else:
weights = np.array(weights).astype(float)
if weights.ndim != 1 or weights.size != len(all_evoked):
raise ValueError('weights must be the same size as all_evoked')
# cf. https://en.wikipedia.org/wiki/Weighted_arithmetic_mean, section on
# "weighted sample variance". The variance of a weighted sample mean is:
#
# ΟΒ² = wβΒ² ΟβΒ² + wβΒ² ΟβΒ² + ... + wβΒ² ΟβΒ²
#
# We estimate the variance of each evoked instance as 1 / nave to get:
#
# ΟΒ² = wβΒ² / naveβ + wβΒ² / naveβ + ... + wβΒ² / naveβ
#
# And our resulting nave is the reciprocal of this:
new_nave = 1. / np.sum(weights ** 2 / naves)
# This general formula is equivalent to formulae in Matti's manual
# (pp 128-129), where:
# new_nave = sum(naves) when weights='nave' and
# new_nave = 1. / sum(1. / naves) when weights are all 1.
all_evoked = _check_evokeds_ch_names_times(all_evoked)
evoked = all_evoked[0].copy()
# use union of bad channels
bads = list(set(b for e in all_evoked for b in e.info['bads']))
evoked.info['bads'] = bads
evoked.data = sum(w * e.data for w, e in zip(weights, all_evoked))
evoked.nave = new_nave
evoked.comment = ' + '.join(f'{w:0.3f} Γ {e.comment or "unknown"}'
for w, e in zip(weights, all_evoked))
return evoked
|
24,308 | def render_logs_progress():
valid_checks = sorted(get_valid_checks())
total_checks = len(valid_checks)
checks_with_logs = 0
lines = ['## Logs specs', '', None, '', '??? check "Completed"']
for check in valid_checks:
config_file = get_config_file(check)
status = ' '
tile_only = not os.path.exists(config_file)
if not tile_only:
with open(config_file) as f:
if '# logs:' in f.read():
status = 'X'
checks_with_logs += 1
else:
readme_file = get_readme_file(check)
if os.path.exists(readme_file):
with open(readme_file) as f:
if '# Log collection' in f.read():
status = 'X'
checks_with_logs += 1
if status != 'X':
total_checks -= 1 # we cannot really add log collection to tile only integrations
lines.append(f' - [{status}] {check}')
percent = checks_with_logs / total_checks * 100
formatted_percent = f'{percent:.2f}'
lines[2] = f'[={formatted_percent}% "{formatted_percent}%"]'
return lines
| def render_logs_progress():
valid_checks = sorted(get_valid_checks())
total_checks = len(valid_checks)
checks_with_logs = 0
lines = ['## Logs specs', '', None, '', '??? check "Completed"']
for check in valid_checks:
config_file = get_config_file(check)
status = ' '
tile_only = not os.path.exists(config_file)
if not tile_only:
with open(config_file) as f:
if '# logs:' in f.read():
status = 'X'
checks_with_logs += 1
else:
readme_file = get_readme_file(check)
if os.path.exists(readme_file):
with open(readme_file, 'r', encoding='utf-8') as f:
if '# Log collection' in f.read():
status = 'X'
checks_with_logs += 1
if status != 'X':
total_checks -= 1 # we cannot really add log collection to tile only integrations
lines.append(f' - [{status}] {check}')
percent = checks_with_logs / total_checks * 100
formatted_percent = f'{percent:.2f}'
lines[2] = f'[={formatted_percent}% "{formatted_percent}%"]'
return lines
|
29,153 | def require_valid_meta_tag_content(meta_tag_content):
"""Generic meta tag content validation.
Args:
meta_tag_content: str. The meta tag content to validate.
Raises:
Exception. Meta tag content is not a string.
Exception. Meta tag content is long.
"""
if not isinstance(meta_tag_content, python_utils.BASESTRING):
raise ValidationError(
'Expected meta tag content to be a string, received %s'
% meta_tag_content)
if len(meta_tag_content) > constants.MAX_CHARS_IN_META_TAG_CONTENT:
raise ValidationError(
'Meta tag content should not be longer than %s characters.'
% constants.MAX_CHARS_IN_META_TAG_CONTENT)
| def require_valid_meta_tag_content(meta_tag_content):
"""Generic meta tag content validation.
Args:
meta_tag_content: str. The meta tag content to validate.
Raises:
Exception. Meta tag content is not a string.
Exception. Meta tag content is longer than expected.
"""
if not isinstance(meta_tag_content, python_utils.BASESTRING):
raise ValidationError(
'Expected meta tag content to be a string, received %s'
% meta_tag_content)
if len(meta_tag_content) > constants.MAX_CHARS_IN_META_TAG_CONTENT:
raise ValidationError(
'Meta tag content should not be longer than %s characters.'
% constants.MAX_CHARS_IN_META_TAG_CONTENT)
|
55,402 | def _check_spark_version_in_range(ver, min_ver, max_ver):
if Version(ver) > Version(min_ver):
ver = _reset_minor_version(ver)
return _check_version_in_range(ver, min_ver, max_ver)
| def _check_spark_version_in_range(ver, min_ver, max_ver):
parsed_ver = Version(ver)
if parsed_ver > Version(min_ver):
ver = f"{parsed_ver.major}.{parsed_ver.minor}"
return _check_version_in_range(ver, min_ver, max_ver)
|
14,338 | def buildCOLR(
colorGlyphs: _ColorGlyphsDict,
version: int = 0,
glyphMap: Optional[Mapping[str, int]] = None,
varStore: Optional[ot.VarStore] = None,
) -> C_O_L_R_.table_C_O_L_R_:
"""Build COLR table from color layers mapping.
Args:
colorGlyphs: If version == 0, a map of base glyph name to, either list of
(layer glyph name, color palette index) tuples.
If version >0, a list of BaseGlyphV1Record or tuples/dicts from which
BaseGlyphV1Record can be assembled.
version: the version of COLR table.
glyphMap: a map from glyph names to glyph indices, as returned from
TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
varStore: Optional ItemVarationStore for deltas associated with v1 layer.
Return:
A new COLR table.
"""
self = C_O_L_R_.table_C_O_L_R_()
if varStore is not None and version == 0:
raise ValueError("Can't add VarStore to COLRv0")
colr = ot.COLR()
if version == 0:
populateCOLRv0(colr, colorGlyphs, glyphMap)
else:
colr.BaseGlyphRecordCount = colr.LayerRecordCount = 0
colr.BaseGlyphRecordArray = colr.LayerRecordArray = None
colr.LayerV1List, colr.BaseGlyphV1List = buildColrV1(colorGlyphs, glyphMap)
self.version = colr.Version = version
if version == 0:
self._fromOTTable(colr)
else:
colr.VarStore = varStore
self.table = colr
return self
| def buildCOLR(
colorGlyphs: _ColorGlyphsDict,
version: int = 0,
glyphMap: Optional[Mapping[str, int]] = None,
varStore: Optional[ot.VarStore] = None,
) -> C_O_L_R_.table_C_O_L_R_:
"""Build COLR table from color layers mapping.
Args:
colorGlyphs: If version == 0, a map of base glyph name to list of
(layer glyph name, color palette index) tuples.
If version >0, a list of BaseGlyphV1Record or tuples/dicts from which
BaseGlyphV1Record can be assembled.
version: the version of COLR table.
glyphMap: a map from glyph names to glyph indices, as returned from
TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
varStore: Optional ItemVarationStore for deltas associated with v1 layer.
Return:
A new COLR table.
"""
self = C_O_L_R_.table_C_O_L_R_()
if varStore is not None and version == 0:
raise ValueError("Can't add VarStore to COLRv0")
colr = ot.COLR()
if version == 0:
populateCOLRv0(colr, colorGlyphs, glyphMap)
else:
colr.BaseGlyphRecordCount = colr.LayerRecordCount = 0
colr.BaseGlyphRecordArray = colr.LayerRecordArray = None
colr.LayerV1List, colr.BaseGlyphV1List = buildColrV1(colorGlyphs, glyphMap)
self.version = colr.Version = version
if version == 0:
self._fromOTTable(colr)
else:
colr.VarStore = varStore
self.table = colr
return self
|
54,446 | def _get_edf_plot(studies: List[Study]) -> "go.Figure":
if len(studies) == 0:
raise ValueError("No studies were given.")
layout = go.Layout(
title="EDF Plot",
xaxis={"title": "Objective Value"},
yaxis={"title": "Cumulative Probability"},
)
all_trials = list(
itertools.chain.from_iterable(
(
trial
for trial in study.get_trials(deepcopy=False)
if trial.state == TrialState.COMPLETE
)
for study in studies
)
)
if len(all_trials) == 0:
_logger.warning("There are no complete trials.")
return go.Figure(data=[], layout=layout)
min_x_value = min(trial.value for trial in all_trials)
max_x_value = max(trial.value for trial in all_trials)
x_values = np.linspace(min_x_value, max_x_value, 100)
traces = []
for study in studies:
trials = [
trial
for trial in study.get_trials(deepcopy=False)
if trial.state == TrialState.COMPLETE
]
y_values = []
for x in x_values:
y = len([trial for trial in trials if trial.value <= x]) / len(trials)
y_values.append(y)
traces.append(go.Scatter(x=x_values, y=y_values, name=study.study_name, mode="lines"))
figure = go.Figure(data=traces, layout=layout)
figure.update_yaxes(range=[0, 1])
return figure
| def _get_edf_plot(studies: List[Study]) -> "go.Figure":
if len(studies) == 0:
raise ValueError("No studies were given.")
layout = go.Layout(
title="EDF Plot",
xaxis={"title": "Objective Value"},
yaxis={"title": "Cumulative Probability"},
)
all_trials = list(
itertools.chain.from_iterable(
(
trial
for trial in study.get_trials(deepcopy=False)
if trial.state == TrialState.COMPLETE
)
for study in studies
)
)
if len(all_trials) == 0:
_logger.warning("There are no complete trials.")
return go.Figure(data=[], layout=layout)
min_x_value = min(trial.value for trial in all_trials)
max_x_value = max(trial.value for trial in all_trials)
x_values = np.linspace(min_x_value, max_x_value, 100)
traces = []
for study in studies:
values = np.asarray(
[
trial.value
for trial in study.get_trials(deepcopy=False)
if trial.state == TrialState.COMPLETE
]
)
y_values = np.sum(values[:, np.newaxis] <= x_values, axis=0) / values.size
traces.append(go.Scatter(x=x_values, y=y_values, name=study.study_name, mode="lines"))
figure = go.Figure(data=traces, layout=layout)
figure.update_yaxes(range=[0, 1])
return figure
|
31,320 | def get_domain_details(client: Client, **args) -> CommandResults:
domain = args.get("domain")
uri = f"/domain/{domain}"
response = client._http_request("GET", uri)
md = ""
current_dns = response["current_dns"]
del response["current_dns"]
md = tableToMarkdown(f"Details for {domain}", response)
for record_type, record_values in current_dns.items():
# If a record type has multiple values, this will output the last item in MD
temp_values = {}
for x in record_values["values"]:
temp_values.update(**x)
record_values.update(temp_values)
del record_values["values"]
md += tableToMarkdown(f"DNS {record_type} Records for {domain}", record_values)
results = CommandResults(
outputs_prefix="SecurityTrails",
outputs_key_field=f"SecurityTrails.domain_details.{domain}",
outputs={domain: {"domain_details": response}},
readable_output=md,
)
return results
| def get_domain_details(client: Client, **args) -> CommandResults:
domain = args.get("domain")
uri = f"/domain/{domain}"
response = client._http_request("GET", uri)
md = ""
current_dns = response["current_dns"]
del response["current_dns"]
md = tableToMarkdown(f"Details for {domain}", response)
for record_type, record_values in current_dns.items():
# If a record type has multiple values, this will output the last item in MD
temp_values: Dict[Any, Any] = {}
for x in record_values["values"]:
temp_values.update(**x)
record_values.update(temp_values)
del record_values["values"]
md += tableToMarkdown(f"DNS {record_type} Records for {domain}", record_values)
results = CommandResults(
outputs_prefix="SecurityTrails",
outputs_key_field=f"SecurityTrails.domain_details.{domain}",
outputs={domain: {"domain_details": response}},
readable_output=md,
)
return results
|
23,398 | def process_search_results(results):
"""
Transform result representation from the output of the widget to the
test framework comparison representation.
"""
matches = {}
for result in results.values():
file, line, col, _ = result
filename = osp.basename(file)
if filename not in matches:
matches[filename] = []
matches[filename].append((line, col))
matches[filename] = sorted(matches[filename])
return matches
| def process_search_results(results):
"""
Transform result representation from the output of the widget to the
test framework comparison representation.
"""
matches = {}
for result in results.values():
file, line, col, __ = result
filename = osp.basename(file)
if filename not in matches:
matches[filename] = []
matches[filename].append((line, col))
matches[filename] = sorted(matches[filename])
return matches
|
56,968 | def get_plain_text(secret: Union[SecretStr, SecretBytes, None, str]) -> Optional[str]:
if secret:
return secret.get_secret_value()
else:
return secret
| def get_plain_text(secret: Union[SecretStr, SecretBytes, None, str]) -> Optional[str]:
try:
return secret.get_secret_value()
except AttributeError:
return secret
|
2,533 | def fetch_openml(
name: Optional[str] = None,
*,
version: Union[str, int] = "active",
data_id: Optional[int] = None,
data_home: Optional[str] = None,
target_column: Optional[Union[str, List]] = "default-target",
cache: bool = True,
return_X_y: bool = False,
as_frame: Union[str, bool] = "auto",
n_retries: int = 3,
delay: float = 1.0,
):
"""Fetch dataset from openml by name or dataset id.
Datasets are uniquely identified by either an integer ID or by a
combination of name and version (i.e. there might be multiple
versions of the 'iris' dataset). Please give either name or data_id
(not both). In case a name is given, a version can also be
provided.
Read more in the :ref:`User Guide <openml>`.
.. versionadded:: 0.20
.. note:: EXPERIMENTAL
The API is experimental (particularly the return value structure),
and might have small backward-incompatible changes without notice
or warning in future releases.
Parameters
----------
name : str, default=None
String identifier of the dataset. Note that OpenML can have multiple
datasets with the same name.
version : int or 'active', default='active'
Version of the dataset. Can only be provided if also ``name`` is given.
If 'active' the oldest version that's still active is used. Since
there may be more than one active version of a dataset, and those
versions may fundamentally be different from one another, setting an
exact version is highly recommended.
data_id : int, default=None
OpenML ID of the dataset. The most specific way of retrieving a
dataset. If data_id is not given, name (and potential version) are
used to obtain a dataset.
data_home : str, default=None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
target_column : str, list or None, default='default-target'
Specify the column name in the data to use as target. If
'default-target', the standard target column a stored on the server
is used. If ``None``, all columns are returned as data and the
target is ``None``. If list (of strings), all columns with these names
are returned as multi-target (Note: not all scikit-learn classifiers
can handle all types of multi-output combinations)
cache : bool, default=True
Whether to cache downloaded datasets.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` objects.
as_frame : bool or 'auto', default='auto'
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
The Bunch will contain a ``frame`` attribute with the target and the
data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas
DataFrames or Series as describe above.
If as_frame is 'auto', the data and target will be converted to
DataFrame or Series as if as_frame is set to True, unless the dataset
is stored in sparse format.
.. versionchanged:: 0.24
The default value of `as_frame` changed from `False` to `'auto'`
in 0.24.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame
The feature matrix. Categorical features are encoded as ordinals.
target : np.array, pandas Series or DataFrame
The regression target or classification labels, if applicable.
Dtype is float if numeric, and object if categorical. If
``as_frame`` is True, ``target`` is a pandas object.
DESCR : str
The full description of the dataset
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
.. versionadded:: 0.22
categories : dict or None
Maps each categorical feature name to a list of values, such
that the value encoded as i is ith in the list. If ``as_frame``
is True, this is None.
details : dict
More metadata from OpenML
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
(data, target) : tuple if ``return_X_y`` is True
.. note:: EXPERIMENTAL
This interface is **experimental** and subsequent releases may
change attributes without notice (although there should only be
minor changes to ``data`` and ``target``).
Missing values in the 'data' are represented as NaN's. Missing values
in 'target' are represented as NaN's (numerical target) or None
(categorical target)
"""
if cache is False:
# no caching will be applied
data_home = None
else:
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, "openml")
# check valid function arguments. data_id XOR (name, version) should be
# provided
if name is not None:
# OpenML is case-insensitive, but the caching mechanism is not
# convert all data names (str) to lower case
name = name.lower()
if data_id is not None:
raise ValueError(
"Dataset data_id={} and name={} passed, but you can only "
"specify a numeric data_id or a name, not "
"both.".format(data_id, name)
)
data_info = _get_data_info_by_name(
name, version, data_home, n_retries=n_retries, delay=delay
)
data_id = data_info["did"]
elif data_id is not None:
# from the previous if statement, it is given that name is None
if version != "active":
raise ValueError(
"Dataset data_id={} and version={} passed, but you can only "
"specify a numeric data_id or a version, not "
"both.".format(data_id, version)
)
else:
raise ValueError(
"Neither name nor data_id are provided. Please provide name or data_id."
)
data_description = _get_data_description_by_id(data_id, data_home)
if data_description["status"] != "active":
warn(
"Version {} of dataset {} is inactive, meaning that issues have "
"been found in the dataset. Try using a newer version from "
"this URL: {}".format(
data_description["version"],
data_description["name"],
data_description["url"],
)
)
if "error" in data_description:
warn(
"OpenML registered a problem with the dataset. It might be "
"unusable. Error: {}".format(data_description["error"])
)
if "warning" in data_description:
warn(
"OpenML raised a warning on the dataset. It might be "
"unusable. Warning: {}".format(data_description["warning"])
)
return_sparse = False
if data_description["format"].lower() == "sparse_arff":
return_sparse = True
if as_frame == "auto":
as_frame = not return_sparse
if as_frame and return_sparse:
raise ValueError("Cannot return dataframe with sparse data")
# download data features, meta-info about column types
features_list = _get_data_features(data_id, data_home)
if not as_frame:
for feature in features_list:
if "true" in (feature["is_ignore"], feature["is_row_identifier"]):
continue
if feature["data_type"] == "string":
raise ValueError(
"STRING attributes are not supported for "
"array representation. Try as_frame=True"
)
if target_column == "default-target":
# determines the default target based on the data feature results
# (which is currently more reliable than the data description;
# see issue: https://github.com/openml/OpenML/issues/768)
target_columns = [
feature["name"]
for feature in features_list
if feature["is_target"] == "true"
]
elif isinstance(target_column, str):
# for code-simplicity, make target_column by default a list
target_columns = [target_column]
elif target_column is None:
target_columns = []
elif isinstance(target_column, list):
target_columns = target_column
else:
raise TypeError(
"Did not recognize type of target_column"
"Should be str, list or None. Got: "
"{}".format(type(target_column))
)
data_columns = _valid_data_column_names(features_list, target_columns)
shape: Optional[Tuple[int, int]]
# determine arff encoding to return
if not return_sparse:
# The shape must include the ignored features to keep the right indexes
# during the arff data conversion.
data_qualities = _get_data_qualities(data_id, data_home)
shape = _get_num_samples(data_qualities), len(features_list)
else:
shape = None
# obtain the data
url = _DATA_FILE.format(data_description["file_id"])
bunch = _download_data_to_bunch(
url,
return_sparse,
data_home,
as_frame=bool(as_frame),
features_list=features_list,
shape=shape,
target_columns=target_columns,
data_columns=data_columns,
md5_checksum=data_description["md5_checksum"],
n_retries=n_retries,
delay=delay,
)
if return_X_y:
return bunch.data, bunch.target
description = "{}\n\nDownloaded from openml.org.".format(
data_description.pop("description")
)
bunch.update(
DESCR=description,
details=data_description,
url="https://www.openml.org/d/{}".format(data_id),
)
return bunch
| def fetch_openml(
name: Optional[str] = None,
*,
version: Union[str, int] = "active",
data_id: Optional[int] = None,
data_home: Optional[str] = None,
target_column: Optional[Union[str, List]] = "default-target",
cache: bool = True,
return_X_y: bool = False,
as_frame: Union[str, bool] = "auto",
n_retries: int = 3,
delay: float = 1.0,
):
"""Fetch dataset from openml by name or dataset id.
Datasets are uniquely identified by either an integer ID or by a
combination of name and version (i.e. there might be multiple
versions of the 'iris' dataset). Please give either name or data_id
(not both). In case a name is given, a version can also be
provided.
Read more in the :ref:`User Guide <openml>`.
.. versionadded:: 0.20
.. note:: EXPERIMENTAL
The API is experimental (particularly the return value structure),
and might have small backward-incompatible changes without notice
or warning in future releases.
Parameters
----------
name : str, default=None
String identifier of the dataset. Note that OpenML can have multiple
datasets with the same name.
version : int or 'active', default='active'
Version of the dataset. Can only be provided if also ``name`` is given.
If 'active' the oldest version that's still active is used. Since
there may be more than one active version of a dataset, and those
versions may fundamentally be different from one another, setting an
exact version is highly recommended.
data_id : int, default=None
OpenML ID of the dataset. The most specific way of retrieving a
dataset. If data_id is not given, name (and potential version) are
used to obtain a dataset.
data_home : str, default=None
Specify another download and cache folder for the data sets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
target_column : str, list or None, default='default-target'
Specify the column name in the data to use as target. If
'default-target', the standard target column a stored on the server
is used. If ``None``, all columns are returned as data and the
target is ``None``. If list (of strings), all columns with these names
are returned as multi-target (Note: not all scikit-learn classifiers
can handle all types of multi-output combinations)
cache : bool, default=True
Whether to cache the downloaded datasets into `data_home`.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` objects.
as_frame : bool or 'auto', default='auto'
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target_columns.
The Bunch will contain a ``frame`` attribute with the target and the
data. If ``return_X_y`` is True, then ``(data, target)`` will be pandas
DataFrames or Series as describe above.
If as_frame is 'auto', the data and target will be converted to
DataFrame or Series as if as_frame is set to True, unless the dataset
is stored in sparse format.
.. versionchanged:: 0.24
The default value of `as_frame` changed from `False` to `'auto'`
in 0.24.
n_retries : int, default=3
Number of retries when HTTP errors are encountered. Error with status
code 412 won't be retried as they represent OpenML generic errors.
delay : float, default=1.0
Number of seconds between retries.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : np.array, scipy.sparse.csr_matrix of floats, or pandas DataFrame
The feature matrix. Categorical features are encoded as ordinals.
target : np.array, pandas Series or DataFrame
The regression target or classification labels, if applicable.
Dtype is float if numeric, and object if categorical. If
``as_frame`` is True, ``target`` is a pandas object.
DESCR : str
The full description of the dataset
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
.. versionadded:: 0.22
categories : dict or None
Maps each categorical feature name to a list of values, such
that the value encoded as i is ith in the list. If ``as_frame``
is True, this is None.
details : dict
More metadata from OpenML
frame : pandas DataFrame
Only present when `as_frame=True`. DataFrame with ``data`` and
``target``.
(data, target) : tuple if ``return_X_y`` is True
.. note:: EXPERIMENTAL
This interface is **experimental** and subsequent releases may
change attributes without notice (although there should only be
minor changes to ``data`` and ``target``).
Missing values in the 'data' are represented as NaN's. Missing values
in 'target' are represented as NaN's (numerical target) or None
(categorical target)
"""
if cache is False:
# no caching will be applied
data_home = None
else:
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, "openml")
# check valid function arguments. data_id XOR (name, version) should be
# provided
if name is not None:
# OpenML is case-insensitive, but the caching mechanism is not
# convert all data names (str) to lower case
name = name.lower()
if data_id is not None:
raise ValueError(
"Dataset data_id={} and name={} passed, but you can only "
"specify a numeric data_id or a name, not "
"both.".format(data_id, name)
)
data_info = _get_data_info_by_name(
name, version, data_home, n_retries=n_retries, delay=delay
)
data_id = data_info["did"]
elif data_id is not None:
# from the previous if statement, it is given that name is None
if version != "active":
raise ValueError(
"Dataset data_id={} and version={} passed, but you can only "
"specify a numeric data_id or a version, not "
"both.".format(data_id, version)
)
else:
raise ValueError(
"Neither name nor data_id are provided. Please provide name or data_id."
)
data_description = _get_data_description_by_id(data_id, data_home)
if data_description["status"] != "active":
warn(
"Version {} of dataset {} is inactive, meaning that issues have "
"been found in the dataset. Try using a newer version from "
"this URL: {}".format(
data_description["version"],
data_description["name"],
data_description["url"],
)
)
if "error" in data_description:
warn(
"OpenML registered a problem with the dataset. It might be "
"unusable. Error: {}".format(data_description["error"])
)
if "warning" in data_description:
warn(
"OpenML raised a warning on the dataset. It might be "
"unusable. Warning: {}".format(data_description["warning"])
)
return_sparse = False
if data_description["format"].lower() == "sparse_arff":
return_sparse = True
if as_frame == "auto":
as_frame = not return_sparse
if as_frame and return_sparse:
raise ValueError("Cannot return dataframe with sparse data")
# download data features, meta-info about column types
features_list = _get_data_features(data_id, data_home)
if not as_frame:
for feature in features_list:
if "true" in (feature["is_ignore"], feature["is_row_identifier"]):
continue
if feature["data_type"] == "string":
raise ValueError(
"STRING attributes are not supported for "
"array representation. Try as_frame=True"
)
if target_column == "default-target":
# determines the default target based on the data feature results
# (which is currently more reliable than the data description;
# see issue: https://github.com/openml/OpenML/issues/768)
target_columns = [
feature["name"]
for feature in features_list
if feature["is_target"] == "true"
]
elif isinstance(target_column, str):
# for code-simplicity, make target_column by default a list
target_columns = [target_column]
elif target_column is None:
target_columns = []
elif isinstance(target_column, list):
target_columns = target_column
else:
raise TypeError(
"Did not recognize type of target_column"
"Should be str, list or None. Got: "
"{}".format(type(target_column))
)
data_columns = _valid_data_column_names(features_list, target_columns)
shape: Optional[Tuple[int, int]]
# determine arff encoding to return
if not return_sparse:
# The shape must include the ignored features to keep the right indexes
# during the arff data conversion.
data_qualities = _get_data_qualities(data_id, data_home)
shape = _get_num_samples(data_qualities), len(features_list)
else:
shape = None
# obtain the data
url = _DATA_FILE.format(data_description["file_id"])
bunch = _download_data_to_bunch(
url,
return_sparse,
data_home,
as_frame=bool(as_frame),
features_list=features_list,
shape=shape,
target_columns=target_columns,
data_columns=data_columns,
md5_checksum=data_description["md5_checksum"],
n_retries=n_retries,
delay=delay,
)
if return_X_y:
return bunch.data, bunch.target
description = "{}\n\nDownloaded from openml.org.".format(
data_description.pop("description")
)
bunch.update(
DESCR=description,
details=data_description,
url="https://www.openml.org/d/{}".format(data_id),
)
return bunch
|
1,106 | def test_rebase_path_traits():
"""Check rebase_path_traits."""
spec = _test_spec()
a = rebase_path_traits(
spec.trait('a'), '/some/path/f1.txt', '/some/path')
assert '%s' % a == 'f1.txt'
b = rebase_path_traits(
spec.trait('b'), ('/some/path/f1.txt', '/some/path/f2.txt'), '/some/path')
assert b == (Path('f1.txt'), Path('f2.txt'))
c = rebase_path_traits(
spec.trait('c'), ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'],
'/some/path')
assert c == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')]
d = rebase_path_traits(
spec.trait('d'), 2.0, '/some/path')
assert d == 2.0
d = rebase_path_traits(
spec.trait('d'), '/some/path/either.txt', '/some/path')
assert '%s' % d == 'either.txt'
e = rebase_path_traits(
spec.trait('e'), ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'],
'/some/path')
assert e == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')]
e = rebase_path_traits(
spec.trait('e'), [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]],
'/some/path')
assert e == [[Path('f1.txt'), Path('f2.txt')], [[Path('f3.txt')]]]
f = rebase_path_traits(
spec.trait('f'), {'1': '/some/path/f1.txt'}, '/some/path')
assert f == {'1': Path('f1.txt')}
g = rebase_path_traits(
spec.trait('g'), 'some/path/either.txt', '/some/path')
assert '%s' % g == 'some/path/either.txt'
g = rebase_path_traits(
spec.trait('g'), '/some/path/either.txt', '/some')
assert '%s' % g == 'path/either.txt'
g = rebase_path_traits(spec.trait('g'), 'string', '/some')
assert '%s' % g == 'string'
g = rebase_path_traits(spec.trait('g'), '2', '/some/path')
assert g == '2' # You dont want this one to be a Path
h = rebase_path_traits(spec.trait('h'), '2', '/some/path')
assert h == '2'
ee = rebase_path_traits(
spec.trait('ee'), [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]],
'/some/path')
assert ee == [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]]
| def test_rebase_path_traits():
"""Check rebase_path_traits."""
spec = _test_spec()
a = rebase_path_traits(
spec.trait('a'), '/some/path/f1.txt', '/some/path')
assert a == Path('f1.txt')
b = rebase_path_traits(
spec.trait('b'), ('/some/path/f1.txt', '/some/path/f2.txt'), '/some/path')
assert b == (Path('f1.txt'), Path('f2.txt'))
c = rebase_path_traits(
spec.trait('c'), ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'],
'/some/path')
assert c == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')]
d = rebase_path_traits(
spec.trait('d'), 2.0, '/some/path')
assert d == 2.0
d = rebase_path_traits(
spec.trait('d'), '/some/path/either.txt', '/some/path')
assert '%s' % d == 'either.txt'
e = rebase_path_traits(
spec.trait('e'), ['/some/path/f1.txt', '/some/path/f2.txt', '/some/path/f3.txt'],
'/some/path')
assert e == [Path('f1.txt'), Path('f2.txt'), Path('f3.txt')]
e = rebase_path_traits(
spec.trait('e'), [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]],
'/some/path')
assert e == [[Path('f1.txt'), Path('f2.txt')], [[Path('f3.txt')]]]
f = rebase_path_traits(
spec.trait('f'), {'1': '/some/path/f1.txt'}, '/some/path')
assert f == {'1': Path('f1.txt')}
g = rebase_path_traits(
spec.trait('g'), 'some/path/either.txt', '/some/path')
assert '%s' % g == 'some/path/either.txt'
g = rebase_path_traits(
spec.trait('g'), '/some/path/either.txt', '/some')
assert '%s' % g == 'path/either.txt'
g = rebase_path_traits(spec.trait('g'), 'string', '/some')
assert '%s' % g == 'string'
g = rebase_path_traits(spec.trait('g'), '2', '/some/path')
assert g == '2' # You dont want this one to be a Path
h = rebase_path_traits(spec.trait('h'), '2', '/some/path')
assert h == '2'
ee = rebase_path_traits(
spec.trait('ee'), [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]],
'/some/path')
assert ee == [['/some/path/f1.txt', '/some/path/f2.txt'], [['/some/path/f3.txt']]]
|
30,534 | def get_request_args(params):
limit = request.args.get('n', None)
offset = request.args.get('s', None)
out_format = request.args.get('v', None)
query = request.args.get('q', None)
if limit is None:
limit = try_parse_integer(params.get('list_size'), CTX_LIMIT_ERR_MSG)
else:
limit = try_parse_integer(limit, CTX_LIMIT_ERR_MSG)
if offset is None:
offset = try_parse_integer(demisto.params().get('offset'), CTX_OFFSET_ERR_MSG)
else:
offset = try_parse_integer(offset, CTX_OFFSET_ERR_MSG)
if out_format is None:
out_format = params.get('format')
else:
if out_format not in ['text', 'json', 'json-seq', 'csv']:
raise DemistoException(CTX_FORMAT_ERR_MSG)
if query is None:
query = params.get('indicators_query')
return limit, offset, out_format, query
| def get_request_args(params):
limit = request.args.get('n', None)
offset = request.args.get('s', None)
out_format = request.args.get('v', params.get('format'))
query = request.args.get('q', None)
if limit is None:
limit = try_parse_integer(params.get('list_size'), CTX_LIMIT_ERR_MSG)
else:
limit = try_parse_integer(limit, CTX_LIMIT_ERR_MSG)
if offset is None:
offset = try_parse_integer(demisto.params().get('offset'), CTX_OFFSET_ERR_MSG)
else:
offset = try_parse_integer(offset, CTX_OFFSET_ERR_MSG)
if out_format is None:
out_format = params.get('format')
else:
if out_format not in ['text', 'json', 'json-seq', 'csv']:
raise DemistoException(CTX_FORMAT_ERR_MSG)
if query is None:
query = params.get('indicators_query')
return limit, offset, out_format, query
|
44,119 | def kahypar_cut(
num_fragments: int,
adjacent_nodes: List[int],
edge_splits: List[int],
imbalance: int = None,
edge_weights: List[Union[int, float]] = None,
node_weights: List[Union[int, float]] = None,
block_weights: List[Union[int, float]] = None,
edges: Iterable[Any] = None,
seed: int = None,
config_path: Union[str, Path] = None,
trial: int = None,
verbose: bool = False,
) -> List[Union[int, Any]]:
"""Calls KaHyPar to partition a graph. Requires KaHyPar to be installed separately with
``pip install kahypar``.
Args:
num_fragments (int): Desired number of fragments.
adjacent_nodes (List[int]): Flattened list of adjacent node indicies per hMETIS format.
edge_splits (List[int]): List of starting indices for edges in the ``adjacent_nodes``.
imbalance (int): Imbalance factor of the partitioning. Defaults to KaHyPar's determination.
edge_weights (List[Union[int, float]]): Weights for edges. Defaults to unit-weighted edges.
node_weights (List[Union[int, float]]): Weights for nodes. Defaults to unit-weighted nodes.
fragment_weights (List[Union[int, float]]): Maximum size constraints by fragment. Defaults
to no such constraints, with ``imbalance`` the only parameter affecting fragment sizes.
edges (Iterable[Any]): Mapping for returning actual cut edge objects instead of cut edge
indices. Defaults to None which will return cut edge indices.
seed (int): KaHyPar's seed. Defaults to the seed in the config file which defaults to -1,
i.e. unfixed seed.
config_path (str): KaHyPar's .ini config file path. Defaults to its SEA20 paper config.
trial (int): trial id for summary label creation. Defaults to None.
verbose (bool): Flag for printing KaHyPar's output summary. Defaults to False.
Returns:
List[Union[int, Any]]: List of cut edges.
**Example**
Consider the following 2-wire circuit with one CNOT gate connecting the wires:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.CNOT(wires=[0, "a"])
qml.RZ(0.240, wires=0)
qml.RZ(0.133, wires="a")
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.expval(qml.PauliZ(wires=[0]))
We can let KaHyPar find the cut placement automatically. First convert it to the input format
for KaHyPar's hypergraph representation:
>>> graph = qcut.tape_to_graph(tape)
>>> adj_nodes, edge_splits, edge_weights = qcut.graph_to_hmetis(graph)
Then feed the output to `qcut.kahypar_cut()` to find the cut edges:
>>> cut_edges = qcut.kahypar_cut(
num_fragments=2,
adjacent_nodes=adj_nodes,
edge_splits=edge_splits,
edges=graph.edges,
)
>>> cut_edges
[(CNOT(wires=[0, 'a']), RZ(0.24, wires=[0]), 0)]
The above cut edge can be seen in the original circuit on wire 0 after the CNOT gate
>>> tape.draw()
0: ββRX(0.432)βββCββRZ(0.24)βββRX(0.432)βββ€ β¨Zβ©
a: ββRY(0.543)βββ°XββRZ(0.133)ββRY(0.543)βββ€
The output cut edges can be subsequently input into `fragment_graph()` to obtain the
fragment subcircuits and the communication graph:
>>> frags, comm_graph = qcut.fragment_graph(graph, cut_edges)
>>> frags
[<networkx.classes.multidigraph.MultiDiGraph at 0x7f3d2ed3b790>,
<networkx.classes.multidigraph.MultiDiGraph at 0x7f3d2ed3b8e0>]
"""
# pylint: disable=too-many-arguments, import-outside-toplevel
import kahypar
trial = 0 if trial is None else trial
ne = len(edge_splits) - 1
nv = max(adjacent_nodes) + 1
if edge_weights is not None or node_weights is not None:
edge_weights = edge_weights or [1] * ne
node_weights = node_weights or [1] * nv
hypergraph = kahypar.Hypergraph(
nv,
ne,
edge_splits,
adjacent_nodes,
num_fragments,
edge_weights,
node_weights,
)
else:
hypergraph = kahypar.Hypergraph(nv, ne, edge_splits, adjacent_nodes, num_fragments)
context = kahypar.Context()
config_path = config_path or str(Path(__file__).parent / "_cut_kKaHyPar_sea20.ini")
context.loadINIconfiguration(config_path)
context.setK(num_fragments)
if isinstance(imbalance, float):
context.setEpsilon(imbalance)
if isinstance(block_weights, Sequence) and (len(block_weights) == num_fragments):
context.setCustomTargetBlockWeights(block_weights)
if isinstance(seed, int):
context.setSeed(int(seed))
if not verbose:
context.suppressOutput(True)
kahypar.partition(hypergraph, context)
cut_edge_mask = [hypergraph.connectivity(e) > 1 for e in hypergraph.edges()]
edges = edges if isinstance(edges, Iterable) else hypergraph.edges()
# compress() ignores the extra hyperwires at the end if there is any.
cut_edges = list(compress(edges, cut_edge_mask))
return cut_edges
| def kahypar_cut(
num_fragments: int,
adjacent_nodes: List[int],
edge_splits: List[int],
imbalance: int = None,
edge_weights: List[Union[int, float]] = None,
node_weights: List[Union[int, float]] = None,
fragment_weights: List[Union[int, float]] = None,
edges: Iterable[Any] = None,
seed: int = None,
config_path: Union[str, Path] = None,
trial: int = None,
verbose: bool = False,
) -> List[Union[int, Any]]:
"""Calls KaHyPar to partition a graph. Requires KaHyPar to be installed separately with
``pip install kahypar``.
Args:
num_fragments (int): Desired number of fragments.
adjacent_nodes (List[int]): Flattened list of adjacent node indicies per hMETIS format.
edge_splits (List[int]): List of starting indices for edges in the ``adjacent_nodes``.
imbalance (int): Imbalance factor of the partitioning. Defaults to KaHyPar's determination.
edge_weights (List[Union[int, float]]): Weights for edges. Defaults to unit-weighted edges.
node_weights (List[Union[int, float]]): Weights for nodes. Defaults to unit-weighted nodes.
fragment_weights (List[Union[int, float]]): Maximum size constraints by fragment. Defaults
to no such constraints, with ``imbalance`` the only parameter affecting fragment sizes.
edges (Iterable[Any]): Mapping for returning actual cut edge objects instead of cut edge
indices. Defaults to None which will return cut edge indices.
seed (int): KaHyPar's seed. Defaults to the seed in the config file which defaults to -1,
i.e. unfixed seed.
config_path (str): KaHyPar's .ini config file path. Defaults to its SEA20 paper config.
trial (int): trial id for summary label creation. Defaults to None.
verbose (bool): Flag for printing KaHyPar's output summary. Defaults to False.
Returns:
List[Union[int, Any]]: List of cut edges.
**Example**
Consider the following 2-wire circuit with one CNOT gate connecting the wires:
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.CNOT(wires=[0, "a"])
qml.RZ(0.240, wires=0)
qml.RZ(0.133, wires="a")
qml.RX(0.432, wires=0)
qml.RY(0.543, wires="a")
qml.expval(qml.PauliZ(wires=[0]))
We can let KaHyPar find the cut placement automatically. First convert it to the input format
for KaHyPar's hypergraph representation:
>>> graph = qcut.tape_to_graph(tape)
>>> adj_nodes, edge_splits, edge_weights = qcut.graph_to_hmetis(graph)
Then feed the output to `qcut.kahypar_cut()` to find the cut edges:
>>> cut_edges = qcut.kahypar_cut(
num_fragments=2,
adjacent_nodes=adj_nodes,
edge_splits=edge_splits,
edges=graph.edges,
)
>>> cut_edges
[(CNOT(wires=[0, 'a']), RZ(0.24, wires=[0]), 0)]
The above cut edge can be seen in the original circuit on wire 0 after the CNOT gate
>>> tape.draw()
0: ββRX(0.432)βββCββRZ(0.24)βββRX(0.432)βββ€ β¨Zβ©
a: ββRY(0.543)βββ°XββRZ(0.133)ββRY(0.543)βββ€
The output cut edges can be subsequently input into `fragment_graph()` to obtain the
fragment subcircuits and the communication graph:
>>> frags, comm_graph = qcut.fragment_graph(graph, cut_edges)
>>> frags
[<networkx.classes.multidigraph.MultiDiGraph at 0x7f3d2ed3b790>,
<networkx.classes.multidigraph.MultiDiGraph at 0x7f3d2ed3b8e0>]
"""
# pylint: disable=too-many-arguments, import-outside-toplevel
import kahypar
trial = 0 if trial is None else trial
ne = len(edge_splits) - 1
nv = max(adjacent_nodes) + 1
if edge_weights is not None or node_weights is not None:
edge_weights = edge_weights or [1] * ne
node_weights = node_weights or [1] * nv
hypergraph = kahypar.Hypergraph(
nv,
ne,
edge_splits,
adjacent_nodes,
num_fragments,
edge_weights,
node_weights,
)
else:
hypergraph = kahypar.Hypergraph(nv, ne, edge_splits, adjacent_nodes, num_fragments)
context = kahypar.Context()
config_path = config_path or str(Path(__file__).parent / "_cut_kKaHyPar_sea20.ini")
context.loadINIconfiguration(config_path)
context.setK(num_fragments)
if isinstance(imbalance, float):
context.setEpsilon(imbalance)
if isinstance(block_weights, Sequence) and (len(block_weights) == num_fragments):
context.setCustomTargetBlockWeights(block_weights)
if isinstance(seed, int):
context.setSeed(int(seed))
if not verbose:
context.suppressOutput(True)
kahypar.partition(hypergraph, context)
cut_edge_mask = [hypergraph.connectivity(e) > 1 for e in hypergraph.edges()]
edges = edges if isinstance(edges, Iterable) else hypergraph.edges()
# compress() ignores the extra hyperwires at the end if there is any.
cut_edges = list(compress(edges, cut_edge_mask))
return cut_edges
|
31,214 | def mantis_get_issue_by_id_command(client, args):
"""
Returns Hello {somename}
Args:
client (Client): Mantis client.
args (dict): all command arguments.
Returns:
Mantis
"""
_id = args.get('id')
resp = client.get_issue(_id).get('issues')[0]
issues = create_output_result(resp)
readable_output = tableToMarkdown("Mantis Issue Details", issues, headers=TABLE_HEADERS)
results = CommandResults(
readable_output=readable_output,
outputs_prefix="Mantis.issue",
outputs_key_field=TABLE_HEADERS,
outputs=issues
)
return_results(results)
| def mantis_get_issue_by_id_command(client, args):
"""
Returns Hello {somename}
Args:
client (Client): Mantis client.
args (dict): all command arguments.
Returns:
Mantis
"""
_id = args.get('id')
resp = client.get_issue(_id).get('issues')[0]
issues = create_output_result(resp)
readable_output = tableToMarkdown("Mantis Issue Details", issues, headers=TABLE_HEADERS)
results = CommandResults(
readable_output=readable_output,
outputs_prefix="Mantis.issue",
outputs_key_field=TABLE_HEADERS,
outputs=issues
)
return results
|
30,974 | def results_return(command, thingtoreturn):
for item in thingtoreturn:
description = ''
ip_reputation = {
'indicator': item['Address'],
}
try:
if item['Malicious']['Vendor']:
score = Common.DBotScore.BAD
description = ip_reputation['description'] = item['Malicious']['Description']
except LookupError:
score = Common.DBotScore.NONE
dbot_score = Common.DBotScore(
indicator=item['Address'],
indicator_type=DBotScoreType.IP,
integration_name='Barracuda(BRBL)',
malicious_description=description,
score=score
)
ip = Common.IP(
ip=item['Address'],
dbot_score=dbot_score
)
results = CommandResults(
outputs_prefix='Barracuda(BRBL).' + str(command),
outputs_key_field='indicator',
outputs=ip_reputation,
indicators=[ip]
)
return_results(results)
| def results_return(command, thingtoreturn):
for item in thingtoreturn:
description = ''
ip_reputation = {
'indicator': item['Address'],
}
try:
if item['Malicious']['Vendor']:
score = Common.DBotScore.BAD
description = ip_reputation['description'] = item['Malicious']['Description']
except LookupError:
score = Common.DBotScore.NONE
dbot_score = Common.DBotScore(
indicator=item['Address'],
indicator_type=DBotScoreType.IP,
integration_name='Barracuda(BRBL)',
malicious_description=description,
score=score
)
ip = Common.IP(
ip=item['Address'],
dbot_score=dbot_score
)
results = CommandResults(
outputs_prefix='Barracuda.' + str(command),
outputs_key_field='indicator',
outputs=ip_reputation,
indicators=[ip]
)
return_results(results)
|
33,011 | def parse_arguments():
parser = argparse.ArgumentParser(
prog="spotdl",
description=help_notice,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"query", type=str, nargs="+", help="URL/String for a song/album/playlist/artist"
)
parser.add_argument("--debug-termination", action="store_true")
parser.add_argument("-o", "--output", help="Output directory path", dest="path")
parser.add_argument(
"-of",
"--output-format",
help="Output format",
dest="format",
choices={"mp3", "m4a", "flac", "ogg", "opus"},
default="mp3",
)
parser.add_argument(
"--user-auth",
help="Use User Authentication",
action="store_true",
dest="userAuth",
)
parser.add_argument("-f", "--ffmpeg", help="Path to ffmpeg", dest="ffmpeg")
parser.add_argument(
"--ignore-ffmpeg-version", help="Ignore ffmpeg version", action="store_true"
)
parser.add_argument(
"-v",
"--version",
help="Show program's version number and exit",
action="store_true",
)
return parser.parse_args()
| def parse_arguments():
parser = argparse.ArgumentParser(
prog="spotdl",
description=help_notice,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"query", type=str, nargs="+", help="URL/String for a song/album/playlist/artist"
)
parser.add_argument("--debug-termination", action="store_true")
parser.add_argument("-o", "--output", help="Output directory path", dest="path")
parser.add_argument(
"-of",
"--output-format",
help="Output format",
dest="format",
choices={"mp3", "m4a", "flac", "ogg", "opus"},
default="mp3",
)
parser.add_argument(
"--user-auth",
help="Use User Authentication",
action="store_true",
dest="userAuth",
)
parser.add_argument("-f", "--ffmpeg", help="Path to ffmpeg", dest="ffmpeg")
parser.add_argument(
"--ignore-ffmpeg-version", help="Ignore ffmpeg version", action="store_true"
)
parser.add_argument(
"-v",
"--version",
help="Show spotDL's version and exit",
action="store_true",
)
return parser.parse_args()
|
41,206 | def decompose_clifford_tableau_to_operations(
qubits: List['cirq.Qid'], clifford_tableau: qis.CliffordTableau
) -> List[ops.Operation]:
"""Decompose an n-qubit Clifford Tableau into a list of one/two qubit operations.
Args:
qubits: The list of qubits being operated on.
clifford_tableau: The Clifford Tableau for decomposition.
Returns:
A list of operations reconstructs the same Clifford tableau.
"""
if len(qubits) != clifford_tableau.n:
raise ValueError(
f"The number of qubits must be the same as the number of Clifford Tableau."
)
assert (
clifford_tableau._validate()
), "The provided clifford_tableau must satisfy the symplectic property."
t: qis.CliffordTableau = clifford_tableau.copy()
operations: List[ops.Operation] = []
args = sim.ActOnCliffordTableauArgs(
tableau=t, axes=[], prng=np.random.RandomState(), log_of_measurement_results={}
)
_X_with_ops = functools.partial(_X, args=args, operations=operations, qubits=qubits)
_Z_with_ops = functools.partial(_Z, args=args, operations=operations, qubits=qubits)
_H_with_ops = functools.partial(_H, args=args, operations=operations, qubits=qubits)
_S_with_ops = functools.partial(_Sdg, args=args, operations=operations, qubits=qubits)
_CNOT_with_ops = functools.partial(_CNOT, args=args, operations=operations, qubits=qubits)
_SWAP_with_ops = functools.partial(_SWAP, args=args, operations=operations, qubits=qubits)
# The procedure is based on Theorem 8 in
# [1] S. Aaronson, D. Gottesman, *Improved Simulation of Stabilizer Circuits*,
# Phys. Rev. A 70, 052328 (2004). https://arxiv.org/abs/quant-ph/0406196
# with modification by doing it row-by-row instead.
# Suppose we have a Clifford Tableau:
# Xs Zs
# Destabilizers: [ A | B ]
# Stabilizers: [ C | D ]
for i in range(t.n):
# Step 1a: Make the diagonal element of A as 1 by Hadamard gate if necessary.
if not t.xs[i, i] and t.zs[i, i]:
_H_with_ops(i)
# Step 1b: Make the diagonal element of A as 1 by swapping gate if necessary.
if not t.xs[i, i]:
for j in range(i + 1, t.n):
if t.xs[i, j]:
_SWAP_with_ops(i, j)
break
# Step 1c: We may still not be able to find non-zero element in whole Xs row. Then,
# apply swap + Hadamard from zs. It is guaranteed to find one by lemma 5 in [1].
if not t.xs[i, i]:
for j in range(i + 1, t.n):
if t.zs[i, j]:
_H_with_ops(j)
_SWAP_with_ops(i, j)
break
# Step 2: Eliminate the elements in A By CNOT and phase gate (i-th row)
# first i rows of destabilizers: [ I 0 | 0 0 ]
_ = [_CNOT_with_ops(i, j) for j in range(i + 1, t.n) if t.xs[i, j]]
if np.any(t.zs[i, i:]):
if not t.zs[i, i]:
_S_with_ops(i)
_ = [_CNOT_with_ops(j, i) for j in range(i + 1, t.n) if t.zs[i, j]]
_S_with_ops(i)
# Step 3: Eliminate the elements in D By CNOT and phase gate (i-th row)
# first i rows of stabilizers: [ 0 0 | I 0 ]
_ = [_CNOT_with_ops(j, i) for j in range(i + 1, t.n) if t.zs[i + t.n, j]]
if np.any(t.xs[i + t.n, i:]):
# Swap xs and zs
_H_with_ops(i)
_ = [_CNOT_with_ops(i, j) for j in range(i + 1, t.n) if t.xs[i + t.n, j]]
if t.zs[i + t.n, i]:
_S_with_ops(i)
_H_with_ops(i)
# Step 4: Correct the phase of tableau
_ = [_Z_with_ops(i) for i, p in enumerate(t.rs[: t.n]) if p]
_ = [_X_with_ops(i) for i, p in enumerate(t.rs[t.n :]) if p]
# Step 5: invert the operations by reversing the orde: (AB)^{+} = B^{+} A^{+}.
# Note only S gate is not self-adjoint.
return operations[::-1]
| def decompose_clifford_tableau_to_operations(
qubits: List['cirq.Qid'], clifford_tableau: qis.CliffordTableau
) -> List[ops.Operation]:
"""Decompose an n-qubit Clifford Tableau into a list of one/two qubit operations.
Args:
qubits: The list of qubits being operated on.
clifford_tableau: The Clifford Tableau for decomposition.
Returns:
A list of operations reconstructs the same Clifford tableau.
"""
if len(qubits) != clifford_tableau.n:
raise ValueError(
f"The number of qubits must be the same as the number of Clifford Tableau."
)
assert (
clifford_tableau._validate()
), "The provided clifford_tableau must satisfy the symplectic property."
t: qis.CliffordTableau = clifford_tableau.copy()
operations: List[ops.Operation] = []
args = sim.ActOnCliffordTableauArgs(
tableau=t, axes=[], prng=np.random.RandomState(), log_of_measurement_results={}
)
_X_with_ops = functools.partial(_X, args=args, operations=operations, qubits=qubits)
_Z_with_ops = functools.partial(_Z, args=args, operations=operations, qubits=qubits)
_H_with_ops = functools.partial(_H, args=args, operations=operations, qubits=qubits)
_S_with_ops = functools.partial(_Sdg, args=args, operations=operations, qubits=qubits)
_CNOT_with_ops = functools.partial(_CNOT, args=args, operations=operations, qubits=qubits)
_SWAP_with_ops = functools.partial(_SWAP, args=args, operations=operations, qubits=qubits)
# The procedure is based on Theorem 8 in
# [1] S. Aaronson, D. Gottesman, *Improved Simulation of Stabilizer Circuits*,
# Phys. Rev. A 70, 052328 (2004). https://arxiv.org/abs/quant-ph/0406196
# with modification by doing it row-by-row instead.
# Suppose we have a Clifford Tableau:
# Xs Zs
# Destabilizers: [ A | B ]
# Stabilizers: [ C | D ]
for i in range(t.n):
# Step 1a: Make the diagonal element of A equal to 1 by Hadamard gate if necessary.
if not t.xs[i, i] and t.zs[i, i]:
_H_with_ops(i)
# Step 1b: Make the diagonal element of A as 1 by swapping gate if necessary.
if not t.xs[i, i]:
for j in range(i + 1, t.n):
if t.xs[i, j]:
_SWAP_with_ops(i, j)
break
# Step 1c: We may still not be able to find non-zero element in whole Xs row. Then,
# apply swap + Hadamard from zs. It is guaranteed to find one by lemma 5 in [1].
if not t.xs[i, i]:
for j in range(i + 1, t.n):
if t.zs[i, j]:
_H_with_ops(j)
_SWAP_with_ops(i, j)
break
# Step 2: Eliminate the elements in A By CNOT and phase gate (i-th row)
# first i rows of destabilizers: [ I 0 | 0 0 ]
_ = [_CNOT_with_ops(i, j) for j in range(i + 1, t.n) if t.xs[i, j]]
if np.any(t.zs[i, i:]):
if not t.zs[i, i]:
_S_with_ops(i)
_ = [_CNOT_with_ops(j, i) for j in range(i + 1, t.n) if t.zs[i, j]]
_S_with_ops(i)
# Step 3: Eliminate the elements in D By CNOT and phase gate (i-th row)
# first i rows of stabilizers: [ 0 0 | I 0 ]
_ = [_CNOT_with_ops(j, i) for j in range(i + 1, t.n) if t.zs[i + t.n, j]]
if np.any(t.xs[i + t.n, i:]):
# Swap xs and zs
_H_with_ops(i)
_ = [_CNOT_with_ops(i, j) for j in range(i + 1, t.n) if t.xs[i + t.n, j]]
if t.zs[i + t.n, i]:
_S_with_ops(i)
_H_with_ops(i)
# Step 4: Correct the phase of tableau
_ = [_Z_with_ops(i) for i, p in enumerate(t.rs[: t.n]) if p]
_ = [_X_with_ops(i) for i, p in enumerate(t.rs[t.n :]) if p]
# Step 5: invert the operations by reversing the orde: (AB)^{+} = B^{+} A^{+}.
# Note only S gate is not self-adjoint.
return operations[::-1]
|
35,436 | def panda_state_function(q: Queue):
pm = messaging.PubMaster(['pandaState'])
while 1:
if not q.empty():
if q.get() == "quit":
break
dat = messaging.new_message('pandaState')
dat.valid = True
dat.pandaState = {
'ignitionLine': True,
'pandaType': "blackPanda",
'controlsAllowed': True,
'safetyModel': 'hondaNidec'
}
pm.send('pandaState', dat)
time.sleep(0.5)
| def panda_state_function(q: Queue):
pm = messaging.PubMaster(['pandaState'])
while not exit_event.is_set():
dat = messaging.new_message('pandaState')
dat.valid = True
dat.pandaState = {
'ignitionLine': True,
'pandaType': "blackPanda",
'controlsAllowed': True,
'safetyModel': 'hondaNidec'
}
pm.send('pandaState', dat)
time.sleep(0.5)
|
14,632 | def _munge_featureset_name(featureset):
"""
Joins features in ``featureset`` by '+' if ``featureset`` is not a string.
Otherwise, returns ``featureset``.
Parameters
----------
featureset : SKLL.FeatureSet
A SKLL ``FeatureSet`` object.
Returns
-------
res : str
``featureset`` names joined with '+', if ``featureset`` is not a string.
"""
if isinstance(featureset, str):
return featureset
res = '+'.join(sorted(featureset))
return res
| def _munge_featureset_name(featureset):
"""
Joins features in ``featureset`` by '+' if ``featureset`` is not a string.
Otherwise, returns ``featureset``.
Parameters
----------
featureset : skll.data.FeatureSet
A SKLL ``FeatureSet`` object.
Returns
-------
res : str
``featureset`` names joined with '+', if ``featureset`` is not a string.
"""
if isinstance(featureset, str):
return featureset
res = '+'.join(sorted(featureset))
return res
|
22,095 | def rec_iter(
filenames: List[str],
sensor: Optional[str],
ignore_rules: Dict[str, Dict[str, List[Tuple[int, int]]]],
) -> Generator[Record, None, None]:
ignorenets = ignore_rules.get("IGNORENETS", {})
neverignore = ignore_rules.get("NEVERIGNORE", {})
for fname in filenames:
with P0fFile(fname) as fdesc:
for line in fdesc:
if not line:
continue
if "mod" not in line:
LOGGER.warning("no mod detected [%r]", line)
continue
if line["mod"] not in ["syn", "syn+ack"]:
continue
if "subj" not in line or line["subj"] not in line:
LOGGER.warning("no subj detected [%r]", line)
continue
if "raw_sig" not in line:
LOGGER.warning("no raw_sig detected [%r]", line)
continue
infos = {}
if "os" in line and line["os"] != "???":
infos["os"] = line["os"]
if "dist" in line:
infos["dist"] = line["dist"]
if "params" in line and line["params"].lower() != "none":
infos["params"] = line["params"]
host = line[line["subj"]].split("/")[0]
srvport = int(line["srv"].split("/")[1])
for rec in handle_rec(
# sensor
sensor,
# ignorenets,
ignorenets,
# neverignore,
neverignore,
# timestamp
timestamp=line["ts"],
# uid
uid=None,
# host
host=host,
# srvport
srvport=srvport,
# recon_type
recon_type="P0FV3_%s" % line["mod"].upper(),
# source
source="P0FV3",
# value
value=line["raw_sig"],
# targetval
targetval=None,
):
rec[1]["infos"] = infos
yield rec
| def rec_iter(
filenames: List[str],
sensor: Optional[str],
ignore_rules: Dict[str, Dict[str, List[Tuple[int, int]]]],
) -> Generator[Record, None, None]:
ignorenets = ignore_rules.get("IGNORENETS", {})
neverignore = ignore_rules.get("NEVERIGNORE", {})
for fname in filenames:
with P0fFile(fname) as fdesc:
for line in fdesc:
if not line:
continue
if "mod" not in line:
LOGGER.warning("no mod detected [%r]", line)
continue
if line["mod"] not in ["syn", "syn+ack"]:
continue
if "subj" not in line or line["subj"] not in line:
LOGGER.warning("no subj detected [%r]", line)
continue
if "raw_sig" not in line:
LOGGER.warning("no raw_sig detected [%r]", line)
continue
infos = {}
if "os" in line and line["os"] != "???":
infos["os"] = line["os"]
if "dist" in line:
infos["dist"] = line["dist"]
if "params" in line and line["params"].lower() != "none":
infos["params"] = line["params"]
host = line[line["subj"]].split("/")[0]
srvport = int(line["srv"].split("/")[1])
for tstamp, rec in handle_rec(
# sensor
sensor,
# ignorenets,
ignorenets,
# neverignore,
neverignore,
# timestamp
timestamp=line["ts"],
# uid
uid=None,
# host
host=host,
# srvport
srvport=srvport,
# recon_type
recon_type="P0FV3_%s" % line["mod"].upper(),
# source
source="P0FV3",
# value
value=line["raw_sig"],
# targetval
targetval=None,
):
rec[1]["infos"] = infos
yield rec
|
41,571 | def get_parameters():
parser = argparse.ArgumentParser(description='This script is curating dataset data_axondeepseg_tem to BIDS')
# Define input path
parser.add_argument("-d", "--data",
help="Path to folder containing the dataset to be curated",
required=True)
# Define output path
parser.add_argument("-o", "--outputdata",
help="Path to output folder",
required=True,
)
arguments = parser.parse_args()
return arguments
| def get_parameters():
parser = argparse.ArgumentParser(description='This script is curating the demo dataset to BIDS')
# Define input path
parser.add_argument("-d", "--data",
help="Path to folder containing the dataset to be curated",
required=True)
# Define output path
parser.add_argument("-o", "--outputdata",
help="Path to output folder",
required=True,
)
arguments = parser.parse_args()
return arguments
|
24,824 | def implements(obj: "Interface", interface: Tuple[type, type]) -> bool:
"""Return whether the give object (maybe an instance or class) implements
the interface.
"""
kimplements = getattr(obj, "__implements__", ())
if not isinstance(kimplements, (list, tuple)):
kimplements = (kimplements,)
return any(
issubclass(implementedinterface, interface)
for implementedinterface in kimplements
)
| def implements(obj: "Interface", interface: Tuple[type, type]) -> bool:
"""Return whether the give object (maybe an instance or class) implements
the interface.
"""
kimplements = getattr(obj, "__implements__", ())
if not isinstance(kimplements, (list, tuple)):
kimplements = (kimplements,)
return any(issubclass(i, interface) for i in kimplements)
|
43,986 | def qnode_spectrum(qnode, encoding_args=None, argnum=None, decimals=8, validation_kwargs=None):
r"""Compute the frequency spectrum of the Fourier representation of quantum circuits,
including classical preprocessing.
The circuit must only use gates as input-encoding gates that can be decomposed
into single-parameter gates of the form :math:`e^{-i x_j G}` , which allows the
computation of the spectrum by inspecting the gates' generators :math:`G`.
The most important example of such single-parameter gates are Pauli rotations.
The argument ``argnum`` controls which QNode arguments are considered as encoded
inputs and the spectrum is computed only for these arguments.
The input-encoding *gates* are those that are controlled by input-encoding QNode arguments.
If no ``argnum`` is given, all QNode arguments are considered to be input-encoding
arguments.
.. note::
Arguments of the QNode or parameters within an array-valued QNode argument
that do not contribute to the Fourier series of the QNode
with any frequency are considered as contributing with a constant term.
That is, a parameter that does not control any gate has the spectrum ``[0]``.
Args:
qnode (pennylane.QNode): :class:`~.pennylane.QNode` to compute the spectrum for
encoding_args (dict[str, list[tuple]], set): Parameter index dictionary;
keys are argument names, values are index tuples for that argument
or an ``Ellipsis``. If a ``set``, all values are set to ``Ellipsis``.
The contained argument and parameter indices indicate the scalar variables
for which the spectrum is computed
argnum (list[int]): Numerical indices for arguments with respect to which
to compute the spectrum
decimals (int): number of decimals to which to round frequencies.
validation_kwargs (dict): Keyword arguments passed to
:func:`~.pennylane.math.is_independent` when testing for linearity of
classical preprocessing in the QNode.
Returns:
function: Function which accepts the same arguments as the QNode.
When called, this function will return a dictionary of dictionaries
containing the frequency spectra per QNode parameter.
**Details**
A circuit that returns an expectation value of a Hermitian observable which depends on
:math:`N` scalar inputs :math:`x_j` can be interpreted as a function
:math:`f: \mathbb{R}^N \rightarrow \mathbb{R}`.
This function can always be expressed by a Fourier-type sum
.. math::
\sum \limits_{\omega_1\in \Omega_1} \dots \sum \limits_{\omega_N \in \Omega_N}
c_{\omega_1,\dots, \omega_N} e^{-i x_1 \omega_1} \dots e^{-i x_N \omega_N}
over the *frequency spectra* :math:`\Omega_j \subseteq \mathbb{R},`
:math:`j=1,\dots,N`. Each spectrum has the property that
:math:`0 \in \Omega_j`, and the spectrum is symmetric
(i.e., for every :math:`\omega \in \Omega_j` we have that :math:`-\omega \in\Omega_j`).
If all frequencies are integer-valued, the Fourier sum becomes a *Fourier series*.
As shown in `Vidal and Theis (2019) <https://arxiv.org/abs/1901.11434>`_ and
`Schuld, Sweke and Meyer (2020) <https://arxiv.org/abs/2008.08605>`_,
if an input :math:`x_j, j = 1 \dots N`,
only enters into single-parameter gates of the form :math:`e^{-i x_j G}`
(where :math:`G` is a Hermitian generator),
the frequency spectrum :math:`\Omega_j` is fully determined by the eigenvalues
of the generators :math:`G`. In many situations, the spectra are limited
to a few frequencies only, which in turn limits the function class that the circuit
can express.
The ``qnode_spectrum`` function computes all frequencies that will
potentially appear in the sets :math:`\Omega_1` to :math:`\Omega_N`.
.. note::
The ``qnode_spectrum`` function also supports
preprocessing of the QNode arguments before they are fed into the gates,
as long as this processing is *linear*. In particular, constant
prefactors for the encoding arguments are allowed.
**Example**
Consider the following example, which uses non-trainable inputs ``x``, ``y`` and ``z``
as well as trainable parameters ``w`` as arguments to the QNode.
.. code-block:: python
n_qubits = 3
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def circuit(x, y, z, w):
for i in range(n_qubits):
qml.RX(0.5*x[i], wires=i)
qml.Rot(w[0,i,0], w[0,i,1], w[0,i,2], wires=i)
qml.RY(2.3*y[i], wires=i)
qml.Rot(w[1,i,0], w[1,i,1], w[1,i,2], wires=i)
qml.RX(z, wires=i)
return qml.expval(qml.PauliZ(wires=0))
This circuit looks as follows:
>>> x = np.array([1., 2., 3.])
>>> y = np.array([0.1, 0.3, 0.5])
>>> z = -1.8
>>> w = np.random.random((2, n_qubits, 3))
>>> print(qml.draw(circuit)(x, y, z, w))
0: ββRX(0.5)ββRot(0.598, 0.949, 0.346)βββRY(0.23)ββRot(0.693, 0.0738, 0.246)ββRX(-1.8)βββ€ β¨Zβ©
1: ββRX(1)ββββRot(0.0711, 0.701, 0.445)ββRY(0.69)ββRot(0.32, 0.0482, 0.437)βββRX(-1.8)βββ€
2: ββRX(1.5)ββRot(0.401, 0.0795, 0.731)ββRY(1.15)ββRot(0.756, 0.38, 0.38)βββββRX(-1.8)βββ€
Applying the ``qnode_spectrum`` function to the circuit for
the non-trainable parameters, we obtain:
>>> res = qml.fourier.qnode_spectrum(circuit, argnum=[0, 1, 2])(x, y, z, w)
>>> for inp, freqs in res.items():
... print(f"{inp}: {freqs}")
"x": {(0,): [-0.5, 0.0, 0.5], (1,): [-0.5, 0.0, 0.5], (2,): [-0.5, 0.0, 0.5]}
"y": {(0,): [-2.3, 0.0, 2.3], (1,): [-2.3, 0.0, 2.3], (2,): [-2.3, 0.0, 2.3]}
"z": {(): [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]}
.. note::
While the Fourier spectrum usually does not depend
on trainable circuit parameters or the actual values of the inputs,
it may still change based on inputs to the QNode that alter the architecture
of the circuit.
.. UsageDetails::
Above, we selected all input-encoding parameters for the spectrum computation, using
the ``argnum`` keyword argument. We may also restrict the full analysis to a single
QNode argument, again using ``argnum``:
>>> res = qml.fourier.qnode_spectrum(circuit, argnum=[0])(x, y, z, w)
>>> for inp, freqs in res.items():
... print(f"{inp}: {freqs}")
"x": {(0,): [-0.5, 0.0, 0.5], (1,): [-0.5, 0.0, 0.5], (2,): [-0.5, 0.0, 0.5]}
Selecting arguments by name instead of index is possible via the
``encoding_args`` argument:
>>> res = qml.fourier.qnode_spectrum(circuit, encoding_args={"y"})(x, y, z, w)
>>> for inp, freqs in res.items():
... print(f"{inp}: {freqs}")
"y": {(0,): [-2.3, 0.0, 2.3], (1,): [-2.3, 0.0, 2.3], (2,): [-2.3, 0.0, 2.3]}
Note that for array-valued arguments the spectrum for each element of the array
is computed. A more fine-grained control is available by passing index tuples
for the respective argument name in ``encoding_args``:
>>> encoding_args = {"y": [(0,),(2,)]}
>>> res = qml.fourier.qnode_spectrum(circuit, encoding_args=encoding_args)(x, y, z, w)
>>> for inp, freqs in res.items():
... print(f"{inp}: {freqs}")
"y": {(0,): [-2.3, 0.0, 2.3], (2,): [-2.3, 0.0, 2.3]}
.. warning::
The ``qnode_spectrum`` function checks whether the classical preprocessing between
QNode and gate arguments is linear by computing the Jacobian of the processing
and applying :func:`~.pennylane.math.is_independent`. This makes it unlikely
-- *but not impossible* -- that non-linear functions go undetected.
The number of additional points at which the Jacobian is computed in the numerical
test of ``is_independent`` as well as other options for this function
can be controlled via ``validation_kwargs``.
Furthermore, the QNode arguments *not* marked in ``argnum`` will not be
considered in this test and if they resemble encoded inputs, the entire
spectrum might be incorrect or the circuit might not even admit one.
The ``qnode_spectrum`` function works in all interfaces:
.. code-block:: python
import tensorflow as tf
dev = qml.device("default.qubit", wires=1)
@qml.qnode(dev, interface='tf')
def circuit(x):
qml.RX(0.4*x[0], wires=0)
qml.PhaseShift(x[1]*np.pi, wires=0)
return qml.expval(qml.PauliZ(wires=0))
x = tf.constant([1., 2.])
res = qml.fourier.qnode_spectrum(circuit)(x)
>>> print(res)
{"x": {(0,): [-0.4, 0.0, 0.4], (1,): [-3.14159, 0.0, 3.14159]}}
Finally, compare ``qnode_spectrum`` with :func:`~.circuit_spectrum`, using
the following circuit.
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x, y, z):
qml.RX(0.5*x**2, wires=0, id="x")
qml.RY(2.3*y, wires=1, id="y0")
qml.CNOT(wires=[1,0])
qml.RY(z, wires=0, id="y1")
return qml.expval(qml.PauliZ(wires=0))
First, note that we assigned ``id`` labels to the gates for which we will use
``circuit_spectrum``. This allows us to choose these gates in the computation:
>>> x, y, z = 0.1, 0.2, 0.3
>>> circuit_spec_fn = qml.fourier.circuit_spectrum(circuit, encoding_gates=["x","y0","y1"])
>>> circuit_spec = circuit_spec_fn(x, y, z)
>>> for _id, spec in circuit_spec.items():
... print(f"{_id}: {spec}")
x: [-1.0, 0, 1.0]
y0: [-1.0, 0, 1.0]
y1: [-1.0, 0, 1.0]
As we can see, the preprocessing in the QNode is not included in the simple spectrum.
In contrast, the output of ``qnode_spectrum`` is:
>>> adv_spec = qml.fourier.qnode_spectrum(circuit, encoding_args={"y", "z"})
>>> for _id, spec in adv_spec.items():
... print(f"{_id}: {spec}")
y: {(): [-2.3, 0.0, 2.3]}
z: {(): [-1.0, 0.0, 1.0]}
Note that the values of the output are dictionaries instead of the spectrum lists, that
they include the prefactors introduced by classical preprocessing, and
that we would not be able to compute the advanced spectrum for ``x`` because it is
preprocessed non-linearily in the gate ``qml.RX(0.5*x**2, wires=0, id="x")``.
"""
# pylint: disable=too-many-branches,protected-access
validation_kwargs = validation_kwargs or {}
encoding_args, argnum = _process_ids(encoding_args, argnum, qnode)
atol = 10 ** (-decimals) if decimals is not None else 1e-10
# A map between Jacobian indices (contiguous) and arg names (may be discontiguous)
arg_name_map = dict(enumerate(encoding_args))
jac_fn = qml.transforms.classical_jacobian(
qnode, argnum=argnum, expand_fn=qml.transforms.tape_expand.expand_multi_par_and_no_gen
)
@wraps(qnode)
def wrapper(*args, **kwargs):
# Compute classical Jacobian and assert preprocessing is linear
if not qml.math.is_independent(jac_fn, qnode.interface, args, kwargs, **validation_kwargs):
raise ValueError(
"The Jacobian of the classical preprocessing in the provided QNode "
"is not constant; only linear classical preprocessing is supported."
)
# After construction, check whether invalid operations (for a spectrum)
# are present in the QNode
for m in qnode.qtape.measurements:
if m.return_type not in {qml.operation.Expectation, qml.operation.Probability}:
raise ValueError(
f"The return_type {m.return_type.value} is not supported as it likely does "
"not admit a Fourier spectrum."
)
class_jacs = jac_fn(*args, **kwargs)
spectra = {}
tape = qml.transforms.tape_expand.expand_multi_par_and_no_gen(qnode.qtape)
par_info = tape._par_info
# Iterate over jacobians per argument
for jac_idx, class_jac in enumerate(class_jacs):
# Obtain argument name for the jacobian index
arg_name = arg_name_map[jac_idx]
# Extract requested parameter indices for the current argument
if encoding_args[arg_name] is Ellipsis:
# If no index for this argument is specified, request all parameters within
# the argument (Recall () is a valid index for scalar-valued arguments here)
requested_par_ids = set(product(*(range(sh) for sh in class_jac.shape[1:])))
else:
requested_par_ids = set(encoding_args[arg_name])
# Each requested parameter at least "contributes" as a constant
_spectra = {par_idx: {0} for par_idx in requested_par_ids}
# Iterate over the axis of the current Jacobian that corresponds to the tape operations
for op_idx, jac_of_op in enumerate(np.round(class_jac, decimals=decimals)):
op = par_info[op_idx]["op"]
# Find parameters that both were requested and feed into the operation
if len(class_jac.shape) == 1:
# Scalar argument, only axis of Jacobian is for operations
if np.isclose(jac_of_op, 0.0, atol=atol, rtol=0):
continue
jac_of_op = {(): jac_of_op}
par_ids = {()}
else:
# Array-valued argument
# Extract indices of parameters contributing to the current operation
par_ids = zip(*[map(int, _ids) for _ids in np.where(jac_of_op)])
# Exclude contributing parameters that were not requested
par_ids = set(par_ids).intersection(requested_par_ids)
if len(par_ids) == 0:
continue
# Multi-parameter gates are not supported (we expanded the tape already)
if len(op.parameters) != 1:
raise ValueError(
"Can only consider one-parameter gates as data-encoding gates; "
f"got {op.name}."
)
spec = get_spectrum(op, decimals=decimals)
# For each contributing parameter, rescale the operation's spectrum
# and add it to the spectrum for that parameter
for par_idx in par_ids:
scale = float(qml.math.abs(jac_of_op[par_idx]))
scaled_spec = [scale * f for f in spec]
_spectra[par_idx] = join_spectra(_spectra[par_idx], scaled_spec)
# Construct the sorted spectrum also containing negative frequencies
for idx, spec in _spectra.items():
spec = sorted(spec)
_spectra[idx] = [-freq for freq in spec[:0:-1]] + spec
spectra[arg_name] = _spectra
return spectra
return wrapper
| def qnode_spectrum(qnode, encoding_args=None, argnum=None, decimals=8, validation_kwargs=None):
r"""Compute the frequency spectrum of the Fourier representation of quantum circuits,
including classical preprocessing.
The circuit must only use gates as input-encoding gates that can be decomposed
into single-parameter gates of the form :math:`e^{-i x_j G}` , which allows the
computation of the spectrum by inspecting the gates' generators :math:`G`.
The most important example of such single-parameter gates are Pauli rotations.
The argument ``argnum`` controls which QNode arguments are considered as encoded
inputs and the spectrum is computed only for these arguments.
The input-encoding *gates* are those that are controlled by input-encoding QNode arguments.
If no ``argnum`` is given, all QNode arguments are considered to be input-encoding
arguments.
.. note::
Arguments of the QNode or parameters within an array-valued QNode argument
that do not contribute to the Fourier series of the QNode
with any frequency are considered as contributing with a constant term.
That is, a parameter that does not control any gate has the spectrum ``[0]``.
Args:
qnode (pennylane.QNode): :class:`~.pennylane.QNode` to compute the spectrum for
encoding_args (dict[str, list[tuple]], set): Parameter index dictionary;
keys are argument names, values are index tuples for that argument
or an ``Ellipsis``. If a ``set``, all values are set to ``Ellipsis``.
The contained argument and parameter indices indicate the scalar variables
for which the spectrum is computed
argnum (list[int]): Numerical indices for arguments with respect to which
to compute the spectrum
decimals (int): number of decimals to which to round frequencies.
validation_kwargs (dict): Keyword arguments passed to
:func:`~.pennylane.math.is_independent` when testing for linearity of
classical preprocessing in the QNode.
Returns:
function: Function which accepts the same arguments as the QNode.
When called, this function will return a dictionary of dictionaries
containing the frequency spectra per QNode parameter.
**Details**
A circuit that returns an expectation value of a Hermitian observable which depends on
:math:`N` scalar inputs :math:`x_j` can be interpreted as a function
:math:`f: \mathbb{R}^N \rightarrow \mathbb{R}`.
This function can always be expressed by a Fourier-type sum
.. math::
\sum \limits_{\omega_1\in \Omega_1} \dots \sum \limits_{\omega_N \in \Omega_N}
c_{\omega_1,\dots, \omega_N} e^{-i x_1 \omega_1} \dots e^{-i x_N \omega_N}
over the *frequency spectra* :math:`\Omega_j \subseteq \mathbb{R},`
:math:`j=1,\dots,N`. Each spectrum has the property that
:math:`0 \in \Omega_j`, and the spectrum is symmetric
(i.e., for every :math:`\omega \in \Omega_j` we have that :math:`-\omega \in\Omega_j`).
If all frequencies are integer-valued, the Fourier sum becomes a *Fourier series*.
As shown in `Vidal and Theis (2019) <https://arxiv.org/abs/1901.11434>`_ and
`Schuld, Sweke and Meyer (2020) <https://arxiv.org/abs/2008.08605>`_,
if an input :math:`x_j, j = 1 \dots N`,
only enters into single-parameter gates of the form :math:`e^{-i x_j G}`
(where :math:`G` is a Hermitian generator),
the frequency spectrum :math:`\Omega_j` is fully determined by the eigenvalues
of the generators :math:`G`. In many situations, the spectra are limited
to a few frequencies only, which in turn limits the function class that the circuit
can express.
The ``qnode_spectrum`` function computes all frequencies that will
potentially appear in the sets :math:`\Omega_1` to :math:`\Omega_N`.
.. note::
The ``qnode_spectrum`` function also supports
preprocessing of the QNode arguments before they are fed into the gates,
as long as this processing is *linear*. In particular, constant
prefactors for the encoding arguments are allowed.
**Example**
Consider the following example, which uses non-trainable inputs ``x``, ``y`` and ``z``
as well as trainable parameters ``w`` as arguments to the QNode.
.. code-block:: python
n_qubits = 3
dev = qml.device("default.qubit", wires=n_qubits)
@qml.qnode(dev)
def circuit(x, y, z, w):
for i in range(n_qubits):
qml.RX(0.5*x[i], wires=i)
qml.Rot(w[0,i,0], w[0,i,1], w[0,i,2], wires=i)
qml.RY(2.3*y[i], wires=i)
qml.Rot(w[1,i,0], w[1,i,1], w[1,i,2], wires=i)
qml.RX(z, wires=i)
return qml.expval(qml.PauliZ(wires=0))
This circuit looks as follows:
>>> x = np.array([1., 2., 3.])
>>> y = np.array([0.1, 0.3, 0.5])
>>> z = -1.8
>>> w = np.random.random((2, n_qubits, 3))
>>> print(qml.draw(circuit)(x, y, z, w))
0: ββRX(0.5)ββRot(0.598, 0.949, 0.346)βββRY(0.23)ββRot(0.693, 0.0738, 0.246)ββRX(-1.8)βββ€ β¨Zβ©
1: ββRX(1)ββββRot(0.0711, 0.701, 0.445)ββRY(0.69)ββRot(0.32, 0.0482, 0.437)βββRX(-1.8)βββ€
2: ββRX(1.5)ββRot(0.401, 0.0795, 0.731)ββRY(1.15)ββRot(0.756, 0.38, 0.38)βββββRX(-1.8)βββ€
Applying the ``qnode_spectrum`` function to the circuit for
the non-trainable parameters, we obtain:
>>> res = qml.fourier.qnode_spectrum(circuit, argnum=[0, 1, 2])(x, y, z, w)
>>> for inp, freqs in res.items():
... print(f"{inp}: {freqs}")
"x": {(0,): [-0.5, 0.0, 0.5], (1,): [-0.5, 0.0, 0.5], (2,): [-0.5, 0.0, 0.5]}
"y": {(0,): [-2.3, 0.0, 2.3], (1,): [-2.3, 0.0, 2.3], (2,): [-2.3, 0.0, 2.3]}
"z": {(): [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]}
.. note::
While the Fourier spectrum usually does not depend
on trainable circuit parameters or the actual values of the inputs,
it may still change based on inputs to the QNode that alter the architecture
of the circuit.
.. UsageDetails::
Above, we selected all input-encoding parameters for the spectrum computation, using
the ``argnum`` keyword argument. We may also restrict the full analysis to a single
QNode argument, again using ``argnum``:
>>> res = qml.fourier.qnode_spectrum(circuit, argnum=[0])(x, y, z, w)
>>> for inp, freqs in res.items():
... print(f"{inp}: {freqs}")
"x": {(0,): [-0.5, 0.0, 0.5], (1,): [-0.5, 0.0, 0.5], (2,): [-0.5, 0.0, 0.5]}
Selecting arguments by name instead of index is possible via the
``encoding_args`` argument:
>>> res = qml.fourier.qnode_spectrum(circuit, encoding_args={"y"})(x, y, z, w)
>>> for inp, freqs in res.items():
... print(f"{inp}: {freqs}")
"y": {(0,): [-2.3, 0.0, 2.3], (1,): [-2.3, 0.0, 2.3], (2,): [-2.3, 0.0, 2.3]}
Note that for array-valued arguments the spectrum for each element of the array
is computed. A more fine-grained control is available by passing index tuples
for the respective argument name in ``encoding_args``:
>>> encoding_args = {"y": [(0,),(2,)]}
>>> res = qml.fourier.qnode_spectrum(circuit, encoding_args=encoding_args)(x, y, z, w)
>>> for inp, freqs in res.items():
... print(f"{inp}: {freqs}")
"y": {(0,): [-2.3, 0.0, 2.3], (2,): [-2.3, 0.0, 2.3]}
.. warning::
The ``qnode_spectrum`` function checks whether the classical preprocessing between
QNode and gate arguments is linear by computing the Jacobian of the processing
and applying :func:`~.pennylane.math.is_independent`. This makes it unlikely
-- *but not impossible* -- that non-linear functions go undetected.
The number of additional points at which the Jacobian is computed in the numerical
test of ``is_independent`` as well as other options for this function
can be controlled via ``validation_kwargs``.
Furthermore, the QNode arguments *not* marked in ``argnum`` will not be
considered in this test and if they resemble encoded inputs, the entire
spectrum might be incorrect or the circuit might not even admit one.
The ``qnode_spectrum`` function works in all interfaces:
.. code-block:: python
import tensorflow as tf
dev = qml.device("default.qubit", wires=1)
@qml.qnode(dev, interface='tf')
def circuit(x):
qml.RX(0.4*x[0], wires=0)
qml.PhaseShift(x[1]*np.pi, wires=0)
return qml.expval(qml.PauliZ(wires=0))
x = tf.constant([1., 2.])
res = qml.fourier.qnode_spectrum(circuit)(x)
>>> print(res)
{"x": {(0,): [-0.4, 0.0, 0.4], (1,): [-3.14159, 0.0, 3.14159]}}
Finally, compare ``qnode_spectrum`` with :func:`~.circuit_spectrum`, using
the following circuit.
.. code-block:: python
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev)
def circuit(x, y, z):
qml.RX(0.5*x**2, wires=0, id="x")
qml.RY(2.3*y, wires=1, id="y0")
qml.CNOT(wires=[1,0])
qml.RY(z, wires=0, id="y1")
return qml.expval(qml.PauliZ(wires=0))
First, note that we assigned ``id`` labels to the gates for which we will use
``circuit_spectrum``. This allows us to choose these gates in the computation:
>>> x, y, z = 0.1, 0.2, 0.3
>>> circuit_spec_fn = qml.fourier.circuit_spectrum(circuit, encoding_gates=["x","y0","y1"])
>>> circuit_spec = circuit_spec_fn(x, y, z)
>>> for _id, spec in circuit_spec.items():
... print(f"{_id}: {spec}")
x: [-1.0, 0, 1.0]
y0: [-1.0, 0, 1.0]
y1: [-1.0, 0, 1.0]
As we can see, the preprocessing in the QNode is not included in the simple spectrum.
In contrast, the output of ``qnode_spectrum`` is:
>>> adv_spec = qml.fourier.qnode_spectrum(circuit, encoding_args={"y", "z"})
>>> for _id, spec in adv_spec.items():
... print(f"{_id}: {spec}")
y: {(): [-2.3, 0.0, 2.3]}
z: {(): [-1.0, 0.0, 1.0]}
Note that the values of the output are dictionaries instead of the spectrum lists, that
they include the prefactors introduced by classical preprocessing, and
that we would not be able to compute the advanced spectrum for ``x`` because it is
preprocessed non-linearily in the gate ``qml.RX(0.5*x**2, wires=0, id="x")``.
"""
# pylint: disable=too-many-branches,protected-access
validation_kwargs = validation_kwargs or {}
encoding_args, argnum = _process_ids(encoding_args, argnum, qnode)
atol = 10 ** (-decimals) if decimals is not None else 1e-10
# A map between Jacobian indices (contiguous) and arg names (may be discontiguous)
arg_name_map = dict(enumerate(encoding_args))
jac_fn = qml.transforms.classical_jacobian(
qnode, argnum=argnum, expand_fn=qml.transforms.expand_multi_par_and_no_gen
)
@wraps(qnode)
def wrapper(*args, **kwargs):
# Compute classical Jacobian and assert preprocessing is linear
if not qml.math.is_independent(jac_fn, qnode.interface, args, kwargs, **validation_kwargs):
raise ValueError(
"The Jacobian of the classical preprocessing in the provided QNode "
"is not constant; only linear classical preprocessing is supported."
)
# After construction, check whether invalid operations (for a spectrum)
# are present in the QNode
for m in qnode.qtape.measurements:
if m.return_type not in {qml.operation.Expectation, qml.operation.Probability}:
raise ValueError(
f"The return_type {m.return_type.value} is not supported as it likely does "
"not admit a Fourier spectrum."
)
class_jacs = jac_fn(*args, **kwargs)
spectra = {}
tape = qml.transforms.tape_expand.expand_multi_par_and_no_gen(qnode.qtape)
par_info = tape._par_info
# Iterate over jacobians per argument
for jac_idx, class_jac in enumerate(class_jacs):
# Obtain argument name for the jacobian index
arg_name = arg_name_map[jac_idx]
# Extract requested parameter indices for the current argument
if encoding_args[arg_name] is Ellipsis:
# If no index for this argument is specified, request all parameters within
# the argument (Recall () is a valid index for scalar-valued arguments here)
requested_par_ids = set(product(*(range(sh) for sh in class_jac.shape[1:])))
else:
requested_par_ids = set(encoding_args[arg_name])
# Each requested parameter at least "contributes" as a constant
_spectra = {par_idx: {0} for par_idx in requested_par_ids}
# Iterate over the axis of the current Jacobian that corresponds to the tape operations
for op_idx, jac_of_op in enumerate(np.round(class_jac, decimals=decimals)):
op = par_info[op_idx]["op"]
# Find parameters that both were requested and feed into the operation
if len(class_jac.shape) == 1:
# Scalar argument, only axis of Jacobian is for operations
if np.isclose(jac_of_op, 0.0, atol=atol, rtol=0):
continue
jac_of_op = {(): jac_of_op}
par_ids = {()}
else:
# Array-valued argument
# Extract indices of parameters contributing to the current operation
par_ids = zip(*[map(int, _ids) for _ids in np.where(jac_of_op)])
# Exclude contributing parameters that were not requested
par_ids = set(par_ids).intersection(requested_par_ids)
if len(par_ids) == 0:
continue
# Multi-parameter gates are not supported (we expanded the tape already)
if len(op.parameters) != 1:
raise ValueError(
"Can only consider one-parameter gates as data-encoding gates; "
f"got {op.name}."
)
spec = get_spectrum(op, decimals=decimals)
# For each contributing parameter, rescale the operation's spectrum
# and add it to the spectrum for that parameter
for par_idx in par_ids:
scale = float(qml.math.abs(jac_of_op[par_idx]))
scaled_spec = [scale * f for f in spec]
_spectra[par_idx] = join_spectra(_spectra[par_idx], scaled_spec)
# Construct the sorted spectrum also containing negative frequencies
for idx, spec in _spectra.items():
spec = sorted(spec)
_spectra[idx] = [-freq for freq in spec[:0:-1]] + spec
spectra[arg_name] = _spectra
return spectra
return wrapper
|
40,596 | def voronoi_partition_pts(points, outline, no_multipolygons=False):
"""
Compute the polygons of a voronoi partition of `points` within the
polygon `outline`. Taken from
https://github.com/FRESNA/vresutils/blob/master/vresutils/graph.py
Attributes
----------
points : Nx2 - ndarray[dtype=float]
outline : Polygon
no_multipolygons : bool (default: False)
If true, replace each MultiPolygon by its largest component
Returns
-------
polygons : N - ndarray[dtype=Polygon|MultiPolygon]
"""
points = np.asarray(points)
if len(points) == 1:
polygons = [outline]
else:
xmin, ymin = np.amin(points, axis=0)
xmax, ymax = np.amax(points, axis=0)
xspan = xmax - xmin
yspan = ymax - ymin
# to avoid any network positions outside all Voronoi cells, append
# the corners of a rectangle framing these points
vor = Voronoi(np.vstack((points,
[[xmin-3.*xspan, ymin-3.*yspan],
[xmin-3.*xspan, ymax+3.*yspan],
[xmax+3.*xspan, ymin-3.*yspan],
[xmax+3.*xspan, ymax+3.*yspan]])))
polygons = []
for i in range(len(points)):
poly = Polygon(vor.vertices[vor.regions[vor.point_region[i]]])
if not poly.is_valid:
poly = poly.buffer(0)
poly = poly.intersection(outline)
polygons.append(poly)
if no_multipolygons:
def demultipolygon(poly):
try:
# for a MultiPolygon pick the part with the largest area
poly = max(poly.geoms, key=lambda pg: pg.area)
except:
pass
return poly
polygons = [demultipolygon(poly) for poly in polygons]
polygons_arr = np.empty((len(polygons),), 'object')
polygons_arr[:] = polygons
return polygons_arr
| def voronoi_partition_pts(points, outline, no_multipolygons=False):
"""
Compute the polygons of a voronoi partition of `points` within the
polygon `outline`. Taken from
https://github.com/FRESNA/vresutils/blob/master/vresutils/graph.py
Attributes
----------
points : Nx2 - ndarray[dtype=float]
outline : Polygon
no_multipolygons : bool (default: False)
If true, replace each MultiPolygon by its largest component
Returns
-------
polygons : N - ndarray[dtype=Polygon|MultiPolygon]
"""
points = np.asarray(points)
if len(points) == 1:
polygons = [outline]
else:
xmin, ymin = np.amin(points, axis=0)
xmax, ymax = np.amax(points, axis=0)
xspan = xmax - xmin
yspan = ymax - ymin
# to avoid any network positions outside all Voronoi cells, append
# the corners of a rectangle framing these points
vor = Voronoi(np.vstack((points,
[[xmin-3.*xspan, ymin-3.*yspan],
[xmin-3.*xspan, ymax+3.*yspan],
[xmax+3.*xspan, ymin-3.*yspan],
[xmax+3.*xspan, ymax+3.*yspan]])))
polygons = []
for i in range(len(points)):
poly = Polygon(vor.vertices[vor.regions[vor.point_region[i]]])
if not poly.is_valid:
poly = poly.buffer(0)
poly = poly.intersection(outline)
polygons.append(poly)
if no_multipolygons:
def demultipolygon(poly):
try:
# for a MultiPolygon pick the part with the largest area
poly = max(poly.geoms, key=lambda pg: pg.area)
except:
pass
return poly
polygons = [demultipolygon(poly) for poly in polygons]
return np.array(polygons, dtype=object)
|
57,822 | def email_command(client, args, email_suspicious_score_threshold, email_malicious_score_threshold, reliability):
emails = argToList(args.get("email"), ",")
results = []
for email in emails:
result = client.get_email_reputation(email)
result['address'] = email
human_readable = tableToMarkdown(f"IPQualityScore Results for {email}", result, result.keys())
if result.get('fraud_score', 0) >= email_malicious_score_threshold:
score = 3
result['Malicious'] = {'Vendor': 'IPQualityScore'}
elif result.get('fraud_score', 0) >= email_suspicious_score_threshold:
score = 2
else:
score = 0
reputation = Common.DBotScore(
indicator=email,
indicator_type=DBotScoreType.EMAIL,
score=score,
integration_name='IPQualityScore',
reliability=get_reputation_reliability(reliability)
)
ip_context = Common.EMAIL(
address=email,
dbot_score=reputation,
domain=result.get('sanitized_email', email).split("@")[-1]
)
results.append(CommandResults(
readable_output=human_readable,
indicator=ip_context,
outputs_prefix='IPQualityScore.Email',
outputs_key_field='address',
outputs=result,
raw_response=result))
return results
| def email_command(client, args, email_suspicious_score_threshold, email_malicious_score_threshold, reliability):
emails = argToList(args.get("email"), ",")
results = []
for email in emails:
result = client.get_email_reputation(email)
result['address'] = email
human_readable = tableToMarkdown(f"IPQualityScore Results for {email}", result, result.keys())
if result.get('fraud_score', 0) >= email_malicious_score_threshold:
score = 3
result['Malicious'] = {'Vendor': 'IPQualityScore'}
elif result.get('fraud_score', 0) >= email_suspicious_score_threshold:
score = 2
else:
score = 0
reputation = Common.DBotScore(
indicator=email,
indicator_type=DBotScoreType.EMAIL,
score=score,
integration_name='IPQualityScore',
reliability=get_reputation_reliability(reliability)
)
email_context = Common.EMAIL(
address=email,
dbot_score=reputation,
domain=result.get('sanitized_email', email).split("@")[-1]
)
results.append(CommandResults(
readable_output=human_readable,
indicator=email_context,
outputs_prefix='IPQualityScore.Email',
outputs_key_field='address',
outputs=result,
raw_response=result))
return results
|
43,936 | def nuclear_attraction(la, lb, ra, rb, alpha, beta, r):
r"""Compute nuclear attraction integral between primitive Gaussian functions.
The nuclear attraction integral between two Gaussian functions denoted by :math:`a` and
:math:`b` can be computed as
[`Helgaker (1995) p820 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
V_{ab} = \frac{2\pi}{p} \sum_{tuv} E_t^{ij} E_u^{kl} E_v^{mn} R_{tuv},
where :math:`E` and :math:`R` represent the Hermite Gaussian expansion coefficient and the
Hermite Coulomb integral, respectively. The sum goes over :math:`i + j + 1`, :math:`k + l + 1`
and :math:`m + m + 1` for :math:`t`, :math:`u` and :math:`v`, respectively and :math:`p` is
computed from the exponents of the two Gaussian functions as :math:`p = \alpha + \beta`.
Args:
la (tuple[int]): angular momentum for the first Gaussian function
lb (tuple[int]): angular momentum for the second Gaussian function
ra (array[float]): position vector of the the first Gaussian function
rb (array[float]): position vector of the the second Gaussian function
alpha (array[float]): exponent of the first Gaussian function
beta (array[float]): exponent of the second Gaussian function
r (array[float]): position vector of nucleus
Returns:
array[float]: nuclear attraction integral between two Gaussian functions
"""
l1, m1, n1 = la
l2, m2, n2 = lb
p = alpha + beta
rgp = (alpha * ra[:, anp.newaxis, anp.newaxis] + beta * rb[:, anp.newaxis, anp.newaxis]) / (
alpha + beta
)
dr = rgp - anp.array(r)[:, anp.newaxis, anp.newaxis]
a = 0.0
for t in range(l1 + l2 + 1):
for u in range(m1 + m2 + 1):
for v in range(n1 + n2 + 1):
a = a + expansion(l1, l2, ra[0], rb[0], alpha, beta, t) * expansion(
m1, m2, ra[1], rb[1], alpha, beta, u
) * expansion(n1, n2, ra[2], rb[2], alpha, beta, v) * _hermite_coulomb(
t, u, v, 0, p, dr
)
a = a * 2 * anp.pi / p
return a
| def nuclear_attraction(la, lb, ra, rb, alpha, beta, r):
r"""Compute nuclear attraction integral between primitive Gaussian functions.
The nuclear attraction integral between two Gaussian functions denoted by :math:`a` and
:math:`b` can be computed as
[`Helgaker (1995) p820 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
V_{ab} = \frac{2\pi}{p} \sum_{tuv} E_t^{ij} E_u^{kl} E_v^{mn} R_{tuv},
where :math:`E` and :math:`R` represent the Hermite Gaussian expansion coefficient and the
Hermite Coulomb integral, respectively. The sum goes over :math:`i + j + 1`, :math:`k + l + 1`
and :math:`m + m + 1` for :math:`t`, :math:`u` and :math:`v`, respectively, and :math:`p` is
computed from the exponents of the two Gaussian functions as :math:`p = \alpha + \beta`.
Args:
la (tuple[int]): angular momentum for the first Gaussian function
lb (tuple[int]): angular momentum for the second Gaussian function
ra (array[float]): position vector of the the first Gaussian function
rb (array[float]): position vector of the the second Gaussian function
alpha (array[float]): exponent of the first Gaussian function
beta (array[float]): exponent of the second Gaussian function
r (array[float]): position vector of nucleus
Returns:
array[float]: nuclear attraction integral between two Gaussian functions
"""
l1, m1, n1 = la
l2, m2, n2 = lb
p = alpha + beta
rgp = (alpha * ra[:, anp.newaxis, anp.newaxis] + beta * rb[:, anp.newaxis, anp.newaxis]) / (
alpha + beta
)
dr = rgp - anp.array(r)[:, anp.newaxis, anp.newaxis]
a = 0.0
for t in range(l1 + l2 + 1):
for u in range(m1 + m2 + 1):
for v in range(n1 + n2 + 1):
a = a + expansion(l1, l2, ra[0], rb[0], alpha, beta, t) * expansion(
m1, m2, ra[1], rb[1], alpha, beta, u
) * expansion(n1, n2, ra[2], rb[2], alpha, beta, v) * _hermite_coulomb(
t, u, v, 0, p, dr
)
a = a * 2 * anp.pi / p
return a
|
36,193 | def language_code_to_iso_3166(language):
"""Turn a language name (en-us) into an ISO 3166 format (en-US)."""
language, _, country = language.lower().partition('-')
if country:
return language + '-' + country.upper()
return language
| def language_code_to_iso_3166(language):
"""Turn a language name (en-us) into an ISO 3166 format (en-US)."""
language, _, country = language.lower().partition('-')
if country:
return f'{language}-{country.upper()}'
return language
|
27,779 | def pytest_collect_file(
fspath: Path, path: py.path.local, parent: Collector,
) -> Optional[Union["DoctestModule", "DoctestTextfile"]]:
config = parent.config
if fspath.suffix == ".py":
if config.option.doctestmodules and not _is_setup_py(fspath):
mod: DoctestModule = DoctestModule.from_parent(parent, fspath=path)
return mod
elif _is_doctest(config, fspath, parent):
txt: DoctestTextfile = DoctestTextfile.from_parent(parent, fspath=path)
return txt
return None
| def pytest_collect_file(
fspath: Path, parent: Collector
) -> Optional[Union["DoctestModule", "DoctestTextfile"]]:
config = parent.config
if fspath.suffix == ".py":
if config.option.doctestmodules and not _is_setup_py(fspath):
mod: DoctestModule = DoctestModule.from_parent(parent, fspath=path)
return mod
elif _is_doctest(config, fspath, parent):
txt: DoctestTextfile = DoctestTextfile.from_parent(parent, fspath=path)
return txt
return None
|
14,113 | def test_fsspec_url():
fsspec = pytest.importorskip("fsspec")
import fsspec.implementations.memory
class MyMemoryFileSystem(fsspec.implementations.memory.MemoryFileSystem):
# Simple fsspec filesystem that adds a required keyword.
# Attempting to use this filesystem without the keyword will raise an exception.
def __init__(self, is_set, *args, **kwargs):
self.is_set = is_set
super().__init__(*args, **kwargs)
fsspec.register_implementation("memory", MyMemoryFileSystem, clobber=True)
memfs = MyMemoryFileSystem(is_set=True)
test_dataset = "naturalearth_lowres"
df = GeoDataFrame(read_file(get_path(test_dataset)))
with memfs.open("data.parquet", "wb") as f:
df.to_parquet(f)
result = read_parquet("memory://data.parquet", storage_options=dict(is_set=True))
assert_geodataframe_equal(result, df)
| def test_fsspec_url():
fsspec = pytest.importorskip("fsspec")
import fsspec.implementations.memory
class MyMemoryFileSystem(fsspec.implementations.memory.MemoryFileSystem):
# Simple fsspec filesystem that adds a required keyword.
# Attempting to use this filesystem without the keyword will raise an exception.
def __init__(self, is_set, *args, **kwargs):
self.is_set = is_set
super().__init__(*args, **kwargs)
fsspec.register_implementation("memory", MyMemoryFileSystem, clobber=True)
memfs = MyMemoryFileSystem(is_set=True)
test_dataset = "naturalearth_lowres"
df = read_file(get_path(test_dataset))
with memfs.open("data.parquet", "wb") as f:
df.to_parquet(f)
result = read_parquet("memory://data.parquet", storage_options=dict(is_set=True))
assert_geodataframe_equal(result, df)
|
Subsets and Splits