id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
23,022 |
def test_frame_series_arithmetic_methods():
pdf1 = pd.DataFrame(
{
"A": np.arange(10),
"B": [np.nan, 1, 2, 3, 4] * 2,
"C": [np.nan] * 10,
"D": np.arange(10),
},
index=list("abcdefghij"),
columns=list("ABCD"),
)
pdf2 = pd.DataFrame(
np.random.randn(10, 4), index=list("abcdefghjk"), columns=list("ABCX")
)
ps1 = pdf1.A
ps2 = pdf2.A
ps3 = pd.Series(np.random.randn(10), index=list("ABCDXabcde"))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 2)
ds1 = ddf1.A
ds2 = ddf2.A
s = dd.core.Scalar({("s", 0): 4}, "s", "i8")
for l, r, el, er in [
(ddf1, ddf2, pdf1, pdf2),
(ds1, ds2, ps1, ps2),
(ddf1.repartition(["a", "f", "j"]), ddf2, pdf1, pdf2),
(ds1.repartition(["a", "b", "f", "j"]), ds2, ps1, ps2),
(ddf1, ddf2.repartition(["a", "k"]), pdf1, pdf2),
(ds1, ds2.repartition(["a", "b", "d", "h", "k"]), ps1, ps2),
(ddf1, 3, pdf1, 3),
(ds1, 3, ps1, 3),
(ddf1, s, pdf1, 4),
(ds1, s, ps1, 4),
]:
# l, r may be repartitioned, test whether repartition keeps original data
assert_eq(l, el)
assert_eq(r, er)
assert_eq(l.add(r, fill_value=0), el.add(er, fill_value=0))
assert_eq(l.sub(r, fill_value=0), el.sub(er, fill_value=0))
assert_eq(l.mul(r, fill_value=0), el.mul(er, fill_value=0))
with warnings.catch_warnings():
# pandas-26793
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(l.div(r, fill_value=0), el.div(er, fill_value=0))
assert_eq(l.divide(r, axis=0), el.divide(er, axis=0))
assert_eq(l.truediv(r, fill_value=0), el.truediv(er, fill_value=0))
assert_eq(l.floordiv(r, fill_value=1), el.floordiv(er, fill_value=1))
assert_eq(l.pow(r, fill_value=0), el.pow(er, fill_value=0))
assert_eq(l.mod(r, fill_value=0), el.mod(er, fill_value=0))
assert_eq(l.radd(r, fill_value=0), el.radd(er, fill_value=0))
assert_eq(l.rsub(r, fill_value=0), el.rsub(er, fill_value=0))
assert_eq(l.rmul(r, fill_value=0), el.rmul(er, fill_value=0))
with warnings.catch_warnings():
# pandas-26793
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(l.rdiv(r, fill_value=0), el.rdiv(er, fill_value=0))
assert_eq(l.rtruediv(r, fill_value=0), el.rtruediv(er, fill_value=0))
assert_eq(l.rfloordiv(r, fill_value=1), el.rfloordiv(er, fill_value=1))
assert_eq(l.rpow(r, fill_value=0), el.rpow(er, fill_value=0))
assert_eq(l.rmod(r, fill_value=0), el.rmod(er, fill_value=0))
for l, r, el, er in [(ddf1, ds2, pdf1, ps2), (ddf1, ddf2.X, pdf1, pdf2.X)]:
assert_eq(l, el)
assert_eq(r, er)
# must specify axis=0 to add Series to each column
# axis=1 is not supported (add to each row)
assert_eq(l.add(r, axis=0), el.add(er, axis=0))
assert_eq(l.sub(r, axis=0), el.sub(er, axis=0))
assert_eq(l.mul(r, axis=0), el.mul(er, axis=0))
assert_eq(l.div(r, axis=0), el.div(er, axis=0))
assert_eq(l.divide(r, axis=0), el.divide(er, axis=0))
assert_eq(l.truediv(r, axis=0), el.truediv(er, axis=0))
assert_eq(l.floordiv(r, axis=0), el.floordiv(er, axis=0))
assert_eq(l.mod(r, axis=0), el.mod(er, axis=0))
assert_eq(l.pow(r, axis=0), el.pow(er, axis=0))
assert_eq(l.radd(r, axis=0), el.radd(er, axis=0))
assert_eq(l.rsub(r, axis=0), el.rsub(er, axis=0))
assert_eq(l.rmul(r, axis=0), el.rmul(er, axis=0))
assert_eq(l.rdiv(r, axis=0), el.rdiv(er, axis=0))
assert_eq(l.rtruediv(r, axis=0), el.rtruediv(er, axis=0))
assert_eq(l.rfloordiv(r, axis=0), el.rfloordiv(er, axis=0))
assert_eq(l.rmod(r, axis=0), el.rmod(er, axis=0))
assert_eq(l.rpow(r, axis=0), el.rpow(er, axis=0))
pytest.raises(ValueError, lambda: l.add(r, axis=1))
for l, r, el, er in [(ddf1, pdf2, pdf1, pdf2), (ddf1, ps3, pdf1, ps3)]:
assert_eq(l, el)
assert_eq(r, er)
for axis in [0, 1, "index", "columns"]:
assert_eq(l.add(r, axis=axis), el.add(er, axis=axis))
assert_eq(l.sub(r, axis=axis), el.sub(er, axis=axis))
assert_eq(l.mul(r, axis=axis), el.mul(er, axis=axis))
assert_eq(l.div(r, axis=axis), el.div(er, axis=axis))
assert_eq(l.divide(r, axis=0), el.divide(er, axis=0))
assert_eq(l.truediv(r, axis=axis), el.truediv(er, axis=axis))
with warnings.catch_warnings():
# https://github.com/pandas-dev/pandas/issues/26793
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(l.floordiv(r, axis=axis), el.floordiv(er, axis=axis))
assert_eq(l.mod(r, axis=axis), el.mod(er, axis=axis))
assert_eq(l.pow(r, axis=axis), el.pow(er, axis=axis))
assert_eq(l.rdiv(r, axis=axis), el.rdiv(er, axis=axis))
assert_eq(l.rtruediv(r, axis=axis), el.rtruediv(er, axis=axis))
assert_eq(l.rfloordiv(r, axis=axis), el.rfloordiv(er, axis=axis))
assert_eq(l.rpow(r, axis=axis), el.rpow(er, axis=axis))
assert_eq(l.rmod(r, axis=axis), el.rmod(er, axis=axis))
assert_eq(l.radd(r, axis=axis), el.radd(er, axis=axis))
assert_eq(l.rsub(r, axis=axis), el.rsub(er, axis=axis))
assert_eq(l.rmul(r, axis=axis), el.rmul(er, axis=axis))
|
def test_frame_series_arithmetic_methods():
pdf1 = pd.DataFrame(
{
"A": np.arange(10),
"B": [np.nan, 1, 2, 3, 4] * 2,
"C": [np.nan] * 10,
"D": np.arange(10),
},
index=list("abcdefghij"),
columns=list("ABCD"),
)
pdf2 = pd.DataFrame(
np.random.randn(10, 4), index=list("abcdefghjk"), columns=list("ABCX")
)
ps1 = pdf1.A
ps2 = pdf2.A
ps3 = pd.Series(np.random.randn(10), index=list("ABCDXabcde"))
ddf1 = dd.from_pandas(pdf1, 2)
ddf2 = dd.from_pandas(pdf2, 2)
ds1 = ddf1.A
ds2 = ddf2.A
s = dd.core.Scalar({("s", 0): 4}, "s", "i8")
for l, r, el, er in [
(ddf1, ddf2, pdf1, pdf2),
(ds1, ds2, ps1, ps2),
(ddf1.repartition(["a", "f", "j"]), ddf2, pdf1, pdf2),
(ds1.repartition(["a", "b", "f", "j"]), ds2, ps1, ps2),
(ddf1, ddf2.repartition(["a", "k"]), pdf1, pdf2),
(ds1, ds2.repartition(["a", "b", "d", "h", "k"]), ps1, ps2),
(ddf1, 3, pdf1, 3),
(ds1, 3, ps1, 3),
(ddf1, s, pdf1, 4),
(ds1, s, ps1, 4),
]:
# l, r may be repartitioned, test whether repartition keeps original data
assert_eq(l, el)
assert_eq(r, er)
assert_eq(l.add(r, fill_value=0), el.add(er, fill_value=0))
assert_eq(l.sub(r, fill_value=0), el.sub(er, fill_value=0))
assert_eq(l.mul(r, fill_value=0), el.mul(er, fill_value=0))
with warnings.catch_warnings():
# pandas-26793
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(l.div(r, fill_value=0), el.div(er, fill_value=0))
assert_eq(l.divide(r, fill_value=0), el.divide(er, fill_value=0))
assert_eq(l.truediv(r, fill_value=0), el.truediv(er, fill_value=0))
assert_eq(l.floordiv(r, fill_value=1), el.floordiv(er, fill_value=1))
assert_eq(l.pow(r, fill_value=0), el.pow(er, fill_value=0))
assert_eq(l.mod(r, fill_value=0), el.mod(er, fill_value=0))
assert_eq(l.radd(r, fill_value=0), el.radd(er, fill_value=0))
assert_eq(l.rsub(r, fill_value=0), el.rsub(er, fill_value=0))
assert_eq(l.rmul(r, fill_value=0), el.rmul(er, fill_value=0))
with warnings.catch_warnings():
# pandas-26793
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(l.rdiv(r, fill_value=0), el.rdiv(er, fill_value=0))
assert_eq(l.rtruediv(r, fill_value=0), el.rtruediv(er, fill_value=0))
assert_eq(l.rfloordiv(r, fill_value=1), el.rfloordiv(er, fill_value=1))
assert_eq(l.rpow(r, fill_value=0), el.rpow(er, fill_value=0))
assert_eq(l.rmod(r, fill_value=0), el.rmod(er, fill_value=0))
for l, r, el, er in [(ddf1, ds2, pdf1, ps2), (ddf1, ddf2.X, pdf1, pdf2.X)]:
assert_eq(l, el)
assert_eq(r, er)
# must specify axis=0 to add Series to each column
# axis=1 is not supported (add to each row)
assert_eq(l.add(r, axis=0), el.add(er, axis=0))
assert_eq(l.sub(r, axis=0), el.sub(er, axis=0))
assert_eq(l.mul(r, axis=0), el.mul(er, axis=0))
assert_eq(l.div(r, axis=0), el.div(er, axis=0))
assert_eq(l.divide(r, axis=0), el.divide(er, axis=0))
assert_eq(l.truediv(r, axis=0), el.truediv(er, axis=0))
assert_eq(l.floordiv(r, axis=0), el.floordiv(er, axis=0))
assert_eq(l.mod(r, axis=0), el.mod(er, axis=0))
assert_eq(l.pow(r, axis=0), el.pow(er, axis=0))
assert_eq(l.radd(r, axis=0), el.radd(er, axis=0))
assert_eq(l.rsub(r, axis=0), el.rsub(er, axis=0))
assert_eq(l.rmul(r, axis=0), el.rmul(er, axis=0))
assert_eq(l.rdiv(r, axis=0), el.rdiv(er, axis=0))
assert_eq(l.rtruediv(r, axis=0), el.rtruediv(er, axis=0))
assert_eq(l.rfloordiv(r, axis=0), el.rfloordiv(er, axis=0))
assert_eq(l.rmod(r, axis=0), el.rmod(er, axis=0))
assert_eq(l.rpow(r, axis=0), el.rpow(er, axis=0))
pytest.raises(ValueError, lambda: l.add(r, axis=1))
for l, r, el, er in [(ddf1, pdf2, pdf1, pdf2), (ddf1, ps3, pdf1, ps3)]:
assert_eq(l, el)
assert_eq(r, er)
for axis in [0, 1, "index", "columns"]:
assert_eq(l.add(r, axis=axis), el.add(er, axis=axis))
assert_eq(l.sub(r, axis=axis), el.sub(er, axis=axis))
assert_eq(l.mul(r, axis=axis), el.mul(er, axis=axis))
assert_eq(l.div(r, axis=axis), el.div(er, axis=axis))
assert_eq(l.divide(r, axis=0), el.divide(er, axis=0))
assert_eq(l.truediv(r, axis=axis), el.truediv(er, axis=axis))
with warnings.catch_warnings():
# https://github.com/pandas-dev/pandas/issues/26793
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(l.floordiv(r, axis=axis), el.floordiv(er, axis=axis))
assert_eq(l.mod(r, axis=axis), el.mod(er, axis=axis))
assert_eq(l.pow(r, axis=axis), el.pow(er, axis=axis))
assert_eq(l.rdiv(r, axis=axis), el.rdiv(er, axis=axis))
assert_eq(l.rtruediv(r, axis=axis), el.rtruediv(er, axis=axis))
assert_eq(l.rfloordiv(r, axis=axis), el.rfloordiv(er, axis=axis))
assert_eq(l.rpow(r, axis=axis), el.rpow(er, axis=axis))
assert_eq(l.rmod(r, axis=axis), el.rmod(er, axis=axis))
assert_eq(l.radd(r, axis=axis), el.radd(er, axis=axis))
assert_eq(l.rsub(r, axis=axis), el.rsub(er, axis=axis))
assert_eq(l.rmul(r, axis=axis), el.rmul(er, axis=axis))
|
23,076 |
def map_blocks(
func,
*args,
name=None,
token=None,
dtype=None,
chunks=None,
drop_axis=[],
new_axis=None,
meta=None,
**kwargs,
):
"""Map a function across all blocks of a dask array.
Note that this function will attempt to automatically determine the output
array type before computing it, please refer to the ``meta`` keyword argument
below if you expect that the function will not suceed when operating on 0-d
arrays.
Parameters
----------
func : callable
Function to apply to every block in the array.
args : dask arrays or other objects
dtype : np.dtype, optional
The ``dtype`` of the output array. It is recommended to provide this.
If not provided, will be inferred by applying the function to a small
set of fake data.
chunks : tuple, optional
Chunk shape of resulting blocks if the function does not preserve
shape. If not provided, the resulting array is assumed to have the same
block structure as the first input array.
drop_axis : number or iterable, optional
Dimensions lost by the function.
new_axis : number or iterable, optional
New dimensions created by the function. Note that these are applied
after ``drop_axis`` (if present).
token : string, optional
The key prefix to use for the output array. If not provided, will be
determined from the function name.
name : string, optional
The key name to use for the output array. Note that this fully
specifies the output key name, and must be unique. If not provided,
will be determined by a hash of the arguments.
meta : array-like, optional
The ``meta`` of the output array, when specified it is expected to be an
array of the same type of that returned when calling ``.compute()`` on
the array returned by this function. When not provided, will be inferred
by applying the function to a small set of fake data, usually a 0-d array.
It's important to ensure that ``func`` can successfully complete
computation without raising exceptions when 0-d is passed to it. If the
output type is known beforehand (e.g., ``np.ndarray``, ``cupy.ndarray``),
an empty array of such type can be passed , for example: ``meta=np.array(())``.
**kwargs :
Other keyword arguments to pass to function. Values must be constants
(not dask.arrays)
See Also
--------
dask.array.blockwise : Generalized operation with control over block alignment.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays.
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = da.map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If the function changes shape of the blocks then you must provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
You have a bit of freedom in specifying chunks. If all of the output chunk
sizes are the same, you can provide just that chunk size as a single tuple.
>>> a = da.arange(18, chunks=(6,))
>>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))
If the function changes the dimension of the blocks you must specify the
created or destroyed dimensions.
>>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),
... new_axis=[0, 2])
If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to
add the necessary number of axes on the left.
Map_blocks aligns blocks by block positions without regard to shape. In the
following example we have two arrays with the same number of blocks but
with different shape and chunk sizes.
>>> x = da.arange(1000, chunks=(100,))
>>> y = da.arange(100, chunks=(10,))
The relevant attribute to match is numblocks.
>>> x.numblocks
(10,)
>>> y.numblocks
(10,)
If these match (up to broadcasting rules) then we can map arbitrary
functions across blocks
>>> def func(a, b):
... return np.array([a.max(), b.max()])
>>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')
dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>
>>> _.compute()
array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,
69, 799, 79, 899, 89, 999, 99])
Your block function get information about where it is in the array by
accepting a special ``block_info`` keyword argument.
>>> def func(block, block_info=None):
... pass
This will receive the following information:
>>> block_info # doctest: +SKIP
{0: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)]},
None: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)],
'chunk-shape': (100,),
'dtype': dtype('float64')}}
For each argument and keyword arguments that are dask arrays (the positions
of which are the first index), you will receive the shape of the full
array, the number of chunks of the full array in each dimension, the chunk
location (for example the fourth chunk over in the first dimension), and
the array location (for example the slice corresponding to ``40:50``). The
same information is provided for the output, with the key ``None``, plus
the shape and dtype that should be returned.
These features can be combined to synthesize an array from scratch, for
example:
>>> def func(block_info=None):
... loc = block_info[None]['array-location'][0]
... return np.arange(loc[0], loc[1])
>>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)
dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>
>>> _.compute()
array([0, 1, 2, 3, 4, 5, 6, 7])
You may specify the key name prefix of the resulting task in the graph with
the optional ``token`` keyword argument.
>>> x.map_blocks(lambda x: x + 1, name='increment') # doctest: +SKIP
dask.array<increment, shape=(100,), dtype=int64, chunksize=(10,), chunktype=numpy.ndarray>
For functions that may not handle 0-d arrays, it's also possible to specify
``meta`` with an empty array matching the type of the expected result. In
the example below, ``func`` will result in an ``IndexError`` when computing
``meta``:
>>> da.map_blocks(lambda x: x[2], da.random.random(5), meta=np.array(()))
dask.array<lambda, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>
Similarly, it's possible to specify a non-NumPy array to ``meta``:
>>> rs = da.random.RandomState(RandomState=cupy.random.RandomState)
>>> da.map_blocks(lambda x: x[2], rs.random(5), meta=cupy.array(()))
dask.array<lambda, shape=(5,), dtype=float64, chunksize=(5,), chunktype=cupy.ndarray>
"""
if not callable(func):
msg = (
"First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)"
)
raise TypeError(msg % type(func).__name__)
if token:
warnings.warn("The token= keyword to map_blocks has been moved to name=")
name = token
name = "%s-%s" % (name or funcname(func), tokenize(func, *args, **kwargs))
new_axes = {}
if isinstance(drop_axis, Number):
drop_axis = [drop_axis]
if isinstance(new_axis, Number):
new_axis = [new_axis] # TODO: handle new_axis
arrs = [a for a in args if isinstance(a, Array)]
argpairs = [
(a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)
for a in args
]
if arrs:
out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]
else:
out_ind = ()
original_kwargs = kwargs
if dtype is None and meta is None:
try:
meta = compute_meta(func, dtype, *args, **kwargs)
except Exception:
pass
dtype = apply_infer_dtype(func, args, original_kwargs, "map_blocks")
if drop_axis:
out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)
if new_axis is None and chunks is not None and len(out_ind) < len(chunks):
new_axis = range(len(chunks) - len(out_ind))
if new_axis:
# new_axis = [x + len(drop_axis) for x in new_axis]
out_ind = list(out_ind)
for ax in sorted(new_axis):
n = len(out_ind) + len(drop_axis)
out_ind.insert(ax, n)
if chunks is not None:
new_axes[n] = chunks[ax]
else:
new_axes[n] = 1
out_ind = tuple(out_ind)
if max(new_axis) > max(out_ind):
raise ValueError("New_axis values do not fill in all dimensions")
if chunks is not None:
if len(chunks) != len(out_ind):
raise ValueError(
"Provided chunks have {0} dims, expected {1} "
"dims.".format(len(chunks), len(out_ind))
)
adjust_chunks = dict(zip(out_ind, chunks))
else:
adjust_chunks = None
out = blockwise(
func,
out_ind,
*concat(argpairs),
name=name,
new_axes=new_axes,
dtype=dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=adjust_chunks,
meta=meta,
**kwargs,
)
extra_argpairs = []
extra_names = []
# If func has block_id as an argument, construct an array of block IDs and
# prepare to inject it.
if has_keyword(func, "block_id"):
block_id_name = "block-id-" + out.name
block_id_dsk = {
(block_id_name,) + block_id: block_id
for block_id in product(*(range(len(c)) for c in out.chunks))
}
block_id_array = Array(
block_id_dsk,
block_id_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_id_array, out_ind))
extra_names.append("block_id")
# If func has block_info as an argument, construct an array of block info
# objects and prepare to inject it.
if has_keyword(func, "block_info"):
starts = {}
num_chunks = {}
shapes = {}
for i, (arg, in_ind) in enumerate(argpairs):
if in_ind is not None:
shapes[i] = arg.shape
if drop_axis:
# We concatenate along dropped axes, so we need to treat them
# as if there is only a single chunk.
starts[i] = [
(
cached_cumsum(arg.chunks[j], initial_zero=True)
if ind in out_ind
else [0, arg.shape[j]]
)
for j, ind in enumerate(in_ind)
]
num_chunks[i] = tuple(len(s) - 1 for s in starts[i])
else:
starts[i] = [
cached_cumsum(c, initial_zero=True) for c in arg.chunks
]
num_chunks[i] = arg.numblocks
out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]
block_info_name = "block-info-" + out.name
block_info_dsk = {}
for block_id in product(*(range(len(c)) for c in out.chunks)):
# Get position of chunk, indexed by axis labels
location = {out_ind[i]: loc for i, loc in enumerate(block_id)}
info = {}
for i, shape in shapes.items():
# Compute chunk key in the array, taking broadcasting into
# account. We don't directly know which dimensions are
# broadcast, but any dimension with only one chunk can be
# treated as broadcast.
arr_k = tuple(
location.get(ind, 0) if num_chunks[i][j] > 1 else 0
for j, ind in enumerate(argpairs[i][1])
)
info[i] = {
"shape": shape,
"num-chunks": num_chunks[i],
"array-location": [
(starts[i][ij][j], starts[i][ij][j + 1])
for ij, j in enumerate(arr_k)
],
"chunk-location": arr_k,
}
info[None] = {
"shape": out.shape,
"num-chunks": out.numblocks,
"array-location": [
(out_starts[ij][j], out_starts[ij][j + 1])
for ij, j in enumerate(block_id)
],
"chunk-location": block_id,
"chunk-shape": tuple(
out.chunks[ij][j] for ij, j in enumerate(block_id)
),
"dtype": dtype,
}
block_info_dsk[(block_info_name,) + block_id] = info
block_info = Array(
block_info_dsk,
block_info_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_info, out_ind))
extra_names.append("block_info")
if extra_argpairs:
# Rewrite the Blockwise layer. It would be nice to find a way to
# avoid doing it twice, but it's currently needed to determine
# out.chunks from the first pass. Since it constructs a Blockwise
# rather than an expanded graph, it shouldn't be too expensive.
out = blockwise(
_pass_extra_kwargs,
out_ind,
func,
None,
tuple(extra_names),
None,
*concat(extra_argpairs),
*concat(argpairs),
name=out.name,
dtype=out.dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=dict(zip(out_ind, out.chunks)),
meta=meta,
**kwargs,
)
return out
|
def map_blocks(
func,
*args,
name=None,
token=None,
dtype=None,
chunks=None,
drop_axis=[],
new_axis=None,
meta=None,
**kwargs,
):
"""Map a function across all blocks of a dask array.
Note that this function will attempt to automatically determine the output
array type before computing it, please refer to the ``meta`` keyword argument
below if you expect that the function will not suceed when operating on 0-d
arrays.
Parameters
----------
func : callable
Function to apply to every block in the array.
args : dask arrays or other objects
dtype : np.dtype, optional
The ``dtype`` of the output array. It is recommended to provide this.
If not provided, will be inferred by applying the function to a small
set of fake data.
chunks : tuple, optional
Chunk shape of resulting blocks if the function does not preserve
shape. If not provided, the resulting array is assumed to have the same
block structure as the first input array.
drop_axis : number or iterable, optional
Dimensions lost by the function.
new_axis : number or iterable, optional
New dimensions created by the function. Note that these are applied
after ``drop_axis`` (if present).
token : string, optional
The key prefix to use for the output array. If not provided, will be
determined from the function name.
name : string, optional
The key name to use for the output array. Note that this fully
specifies the output key name, and must be unique. If not provided,
will be determined by a hash of the arguments.
meta : array-like, optional
The ``meta`` of the output array, when specified is expected to be an
array of the same type of that returned when calling ``.compute()`` on
the array returned by this function. When not provided, will be inferred
by applying the function to a small set of fake data, usually a 0-d array.
It's important to ensure that ``func`` can successfully complete
computation without raising exceptions when 0-d is passed to it. If the
output type is known beforehand (e.g., ``np.ndarray``, ``cupy.ndarray``),
an empty array of such type can be passed , for example: ``meta=np.array(())``.
**kwargs :
Other keyword arguments to pass to function. Values must be constants
(not dask.arrays)
See Also
--------
dask.array.blockwise : Generalized operation with control over block alignment.
Examples
--------
>>> import dask.array as da
>>> x = da.arange(6, chunks=3)
>>> x.map_blocks(lambda x: x * 2).compute()
array([ 0, 2, 4, 6, 8, 10])
The ``da.map_blocks`` function can also accept multiple arrays.
>>> d = da.arange(5, chunks=2)
>>> e = da.arange(5, chunks=2)
>>> f = da.map_blocks(lambda a, b: a + b**2, d, e)
>>> f.compute()
array([ 0, 2, 6, 12, 20])
If the function changes shape of the blocks then you must provide chunks
explicitly.
>>> y = x.map_blocks(lambda x: x[::2], chunks=((2, 2),))
You have a bit of freedom in specifying chunks. If all of the output chunk
sizes are the same, you can provide just that chunk size as a single tuple.
>>> a = da.arange(18, chunks=(6,))
>>> b = a.map_blocks(lambda x: x[:3], chunks=(3,))
If the function changes the dimension of the blocks you must specify the
created or destroyed dimensions.
>>> b = a.map_blocks(lambda x: x[None, :, None], chunks=(1, 6, 1),
... new_axis=[0, 2])
If ``chunks`` is specified but ``new_axis`` is not, then it is inferred to
add the necessary number of axes on the left.
Map_blocks aligns blocks by block positions without regard to shape. In the
following example we have two arrays with the same number of blocks but
with different shape and chunk sizes.
>>> x = da.arange(1000, chunks=(100,))
>>> y = da.arange(100, chunks=(10,))
The relevant attribute to match is numblocks.
>>> x.numblocks
(10,)
>>> y.numblocks
(10,)
If these match (up to broadcasting rules) then we can map arbitrary
functions across blocks
>>> def func(a, b):
... return np.array([a.max(), b.max()])
>>> da.map_blocks(func, x, y, chunks=(2,), dtype='i8')
dask.array<func, shape=(20,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>
>>> _.compute()
array([ 99, 9, 199, 19, 299, 29, 399, 39, 499, 49, 599, 59, 699,
69, 799, 79, 899, 89, 999, 99])
Your block function get information about where it is in the array by
accepting a special ``block_info`` keyword argument.
>>> def func(block, block_info=None):
... pass
This will receive the following information:
>>> block_info # doctest: +SKIP
{0: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)]},
None: {'shape': (1000,),
'num-chunks': (10,),
'chunk-location': (4,),
'array-location': [(400, 500)],
'chunk-shape': (100,),
'dtype': dtype('float64')}}
For each argument and keyword arguments that are dask arrays (the positions
of which are the first index), you will receive the shape of the full
array, the number of chunks of the full array in each dimension, the chunk
location (for example the fourth chunk over in the first dimension), and
the array location (for example the slice corresponding to ``40:50``). The
same information is provided for the output, with the key ``None``, plus
the shape and dtype that should be returned.
These features can be combined to synthesize an array from scratch, for
example:
>>> def func(block_info=None):
... loc = block_info[None]['array-location'][0]
... return np.arange(loc[0], loc[1])
>>> da.map_blocks(func, chunks=((4, 4),), dtype=np.float_)
dask.array<func, shape=(8,), dtype=float64, chunksize=(4,), chunktype=numpy.ndarray>
>>> _.compute()
array([0, 1, 2, 3, 4, 5, 6, 7])
You may specify the key name prefix of the resulting task in the graph with
the optional ``token`` keyword argument.
>>> x.map_blocks(lambda x: x + 1, name='increment') # doctest: +SKIP
dask.array<increment, shape=(100,), dtype=int64, chunksize=(10,), chunktype=numpy.ndarray>
For functions that may not handle 0-d arrays, it's also possible to specify
``meta`` with an empty array matching the type of the expected result. In
the example below, ``func`` will result in an ``IndexError`` when computing
``meta``:
>>> da.map_blocks(lambda x: x[2], da.random.random(5), meta=np.array(()))
dask.array<lambda, shape=(5,), dtype=float64, chunksize=(5,), chunktype=numpy.ndarray>
Similarly, it's possible to specify a non-NumPy array to ``meta``:
>>> rs = da.random.RandomState(RandomState=cupy.random.RandomState)
>>> da.map_blocks(lambda x: x[2], rs.random(5), meta=cupy.array(()))
dask.array<lambda, shape=(5,), dtype=float64, chunksize=(5,), chunktype=cupy.ndarray>
"""
if not callable(func):
msg = (
"First argument must be callable function, not %s\n"
"Usage: da.map_blocks(function, x)\n"
" or: da.map_blocks(function, x, y, z)"
)
raise TypeError(msg % type(func).__name__)
if token:
warnings.warn("The token= keyword to map_blocks has been moved to name=")
name = token
name = "%s-%s" % (name or funcname(func), tokenize(func, *args, **kwargs))
new_axes = {}
if isinstance(drop_axis, Number):
drop_axis = [drop_axis]
if isinstance(new_axis, Number):
new_axis = [new_axis] # TODO: handle new_axis
arrs = [a for a in args if isinstance(a, Array)]
argpairs = [
(a, tuple(range(a.ndim))[::-1]) if isinstance(a, Array) else (a, None)
for a in args
]
if arrs:
out_ind = tuple(range(max(a.ndim for a in arrs)))[::-1]
else:
out_ind = ()
original_kwargs = kwargs
if dtype is None and meta is None:
try:
meta = compute_meta(func, dtype, *args, **kwargs)
except Exception:
pass
dtype = apply_infer_dtype(func, args, original_kwargs, "map_blocks")
if drop_axis:
out_ind = tuple(x for i, x in enumerate(out_ind) if i not in drop_axis)
if new_axis is None and chunks is not None and len(out_ind) < len(chunks):
new_axis = range(len(chunks) - len(out_ind))
if new_axis:
# new_axis = [x + len(drop_axis) for x in new_axis]
out_ind = list(out_ind)
for ax in sorted(new_axis):
n = len(out_ind) + len(drop_axis)
out_ind.insert(ax, n)
if chunks is not None:
new_axes[n] = chunks[ax]
else:
new_axes[n] = 1
out_ind = tuple(out_ind)
if max(new_axis) > max(out_ind):
raise ValueError("New_axis values do not fill in all dimensions")
if chunks is not None:
if len(chunks) != len(out_ind):
raise ValueError(
"Provided chunks have {0} dims, expected {1} "
"dims.".format(len(chunks), len(out_ind))
)
adjust_chunks = dict(zip(out_ind, chunks))
else:
adjust_chunks = None
out = blockwise(
func,
out_ind,
*concat(argpairs),
name=name,
new_axes=new_axes,
dtype=dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=adjust_chunks,
meta=meta,
**kwargs,
)
extra_argpairs = []
extra_names = []
# If func has block_id as an argument, construct an array of block IDs and
# prepare to inject it.
if has_keyword(func, "block_id"):
block_id_name = "block-id-" + out.name
block_id_dsk = {
(block_id_name,) + block_id: block_id
for block_id in product(*(range(len(c)) for c in out.chunks))
}
block_id_array = Array(
block_id_dsk,
block_id_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_id_array, out_ind))
extra_names.append("block_id")
# If func has block_info as an argument, construct an array of block info
# objects and prepare to inject it.
if has_keyword(func, "block_info"):
starts = {}
num_chunks = {}
shapes = {}
for i, (arg, in_ind) in enumerate(argpairs):
if in_ind is not None:
shapes[i] = arg.shape
if drop_axis:
# We concatenate along dropped axes, so we need to treat them
# as if there is only a single chunk.
starts[i] = [
(
cached_cumsum(arg.chunks[j], initial_zero=True)
if ind in out_ind
else [0, arg.shape[j]]
)
for j, ind in enumerate(in_ind)
]
num_chunks[i] = tuple(len(s) - 1 for s in starts[i])
else:
starts[i] = [
cached_cumsum(c, initial_zero=True) for c in arg.chunks
]
num_chunks[i] = arg.numblocks
out_starts = [cached_cumsum(c, initial_zero=True) for c in out.chunks]
block_info_name = "block-info-" + out.name
block_info_dsk = {}
for block_id in product(*(range(len(c)) for c in out.chunks)):
# Get position of chunk, indexed by axis labels
location = {out_ind[i]: loc for i, loc in enumerate(block_id)}
info = {}
for i, shape in shapes.items():
# Compute chunk key in the array, taking broadcasting into
# account. We don't directly know which dimensions are
# broadcast, but any dimension with only one chunk can be
# treated as broadcast.
arr_k = tuple(
location.get(ind, 0) if num_chunks[i][j] > 1 else 0
for j, ind in enumerate(argpairs[i][1])
)
info[i] = {
"shape": shape,
"num-chunks": num_chunks[i],
"array-location": [
(starts[i][ij][j], starts[i][ij][j + 1])
for ij, j in enumerate(arr_k)
],
"chunk-location": arr_k,
}
info[None] = {
"shape": out.shape,
"num-chunks": out.numblocks,
"array-location": [
(out_starts[ij][j], out_starts[ij][j + 1])
for ij, j in enumerate(block_id)
],
"chunk-location": block_id,
"chunk-shape": tuple(
out.chunks[ij][j] for ij, j in enumerate(block_id)
),
"dtype": dtype,
}
block_info_dsk[(block_info_name,) + block_id] = info
block_info = Array(
block_info_dsk,
block_info_name,
chunks=tuple((1,) * len(c) for c in out.chunks),
dtype=np.object_,
)
extra_argpairs.append((block_info, out_ind))
extra_names.append("block_info")
if extra_argpairs:
# Rewrite the Blockwise layer. It would be nice to find a way to
# avoid doing it twice, but it's currently needed to determine
# out.chunks from the first pass. Since it constructs a Blockwise
# rather than an expanded graph, it shouldn't be too expensive.
out = blockwise(
_pass_extra_kwargs,
out_ind,
func,
None,
tuple(extra_names),
None,
*concat(extra_argpairs),
*concat(argpairs),
name=out.name,
dtype=out.dtype,
concatenate=True,
align_arrays=False,
adjust_chunks=dict(zip(out_ind, out.chunks)),
meta=meta,
**kwargs,
)
return out
|
45,583 |
def number_of_sanic_workers() -> int:
"""Get the number of Sanic workers to use in `app.run()`.
If the environment variable constants.ENV_SANIC_WORKERS is set and is not equal to 1.
"""
def _log_and_get_default_number_of_workers():
logger.debug(
f"Using the default number of Sanic workers ({DEFAULT_SANIC_WORKERS})."
)
return DEFAULT_SANIC_WORKERS
try:
env_value = int(os.environ.get(ENV_SANIC_WORKERS, DEFAULT_SANIC_WORKERS))
except ValueError:
logger.error(
f"Cannot convert environment variable `{ENV_SANIC_WORKERS}` "
f"to int ('{os.environ[ENV_SANIC_WORKERS]}')."
)
return _log_and_get_default_number_of_workers()
if env_value == DEFAULT_SANIC_WORKERS:
return _log_and_get_default_number_of_workers()
if env_value < 1:
logger.debug(
f"Cannot set number of Sanic workers to the desired value "
f"({env_value}). The number of workers must be at least 1."
)
return _log_and_get_default_number_of_workers()
logger.debug(f"Using {env_value} Sanic workers.")
return env_value
|
def number_of_sanic_workers() -> int:
"""Get the number of Sanic workers to use in `app.run()`.
If the environment variable `constants.ENV_SANIC_WORKERS` is set and is not equal to 1.
"""
def _log_and_get_default_number_of_workers():
logger.debug(
f"Using the default number of Sanic workers ({DEFAULT_SANIC_WORKERS})."
)
return DEFAULT_SANIC_WORKERS
try:
env_value = int(os.environ.get(ENV_SANIC_WORKERS, DEFAULT_SANIC_WORKERS))
except ValueError:
logger.error(
f"Cannot convert environment variable `{ENV_SANIC_WORKERS}` "
f"to int ('{os.environ[ENV_SANIC_WORKERS]}')."
)
return _log_and_get_default_number_of_workers()
if env_value == DEFAULT_SANIC_WORKERS:
return _log_and_get_default_number_of_workers()
if env_value < 1:
logger.debug(
f"Cannot set number of Sanic workers to the desired value "
f"({env_value}). The number of workers must be at least 1."
)
return _log_and_get_default_number_of_workers()
logger.debug(f"Using {env_value} Sanic workers.")
return env_value
|
54,175 |
def remove(key):
return CACHE.remove(key)
|
def remove(key):
return CACHE.remove(key)
|
39,501 |
def parse_args():
parser = argparse.ArgumentParser(description='Submitit for PyTorch Distributed Benchmark', add_help=False)
parser.add_argument(
"--ngpus",
default=2,
type=int,
help="Number of gpus to request on each node"
)
parser.add_argument(
"--nodes",
default=1,
type=int,
help="Number of nodes to request"
)
parser.add_argument(
"--timeout",
default=1440,
type=int,
help="Duration of the job"
)
parser.add_argument(
"--partition",
default="train",
type=str,
help="Partition where to submit"
)
parser.add_argument(
"--job_dir",
default=os.getcwd(),
type=str,
help="A shared folder across all worker processes"
)
parser.add_argument(
"--model",
type=str,
default="torchbenchmark.e2e_models.hf_bert.Model",
help="specify the model to experiment with"
)
parser.add_argument(
"--trainer",
type=str,
default="torchbenchmark.util.distributed.ddp.DDPTrainer",
help="ddp - DistributedDataParallel"
)
return parser.parse_args()
|
def parse_args():
parser.add_argument(
"--ngpus",
default=2,
type=int,
help="Number of gpus to request on each node"
)
parser.add_argument(
"--nodes",
default=1,
type=int,
help="Number of nodes to request"
)
parser.add_argument(
"--timeout",
default=1440,
type=int,
help="Duration of the job"
)
parser.add_argument(
"--partition",
default="train",
type=str,
help="Partition where to submit"
)
parser.add_argument(
"--job_dir",
default=os.getcwd(),
type=str,
help="A shared folder across all worker processes"
)
parser.add_argument(
"--model",
type=str,
default="torchbenchmark.e2e_models.hf_bert.Model",
help="specify the model to experiment with"
)
parser.add_argument(
"--trainer",
type=str,
default="torchbenchmark.util.distributed.ddp.DDPTrainer",
help="ddp - DistributedDataParallel"
)
return parser.parse_args()
|
57,033 |
def refresh_state_of_beam_job_run_model(
beam_job_run_model: beam_job_models.BeamJobRunModel
) -> None:
"""Refreshs the state of the given BeamJobRunModel.
Args:
beam_job_run_model: BeamJobRunModel. The model to update.
"""
job_id = beam_job_run_model.dataflow_job_id
if job_id is None:
beam_job_run_model.latest_job_state = (
beam_job_models.BeamJobState.UNKNOWN.value)
beam_job_run_model.update_timestamps(update_last_updated_time=False)
return
try:
job = dataflow.JobsV1Beta3Client().get_job(dataflow.GetJobRequest(
job_id=job_id, project_id=feconf.OPPIA_PROJECT_ID,
location=feconf.GOOGLE_APP_ENGINE_REGION))
except Exception:
job_state = beam_job_models.BeamJobState.UNKNOWN.value
job_state_updated = beam_job_run_model.last_updated
logging.exception('Failed to update job_id="%s"!' % job_id)
else:
job_state = _GCLOUD_DATAFLOW_JOB_STATE_TO_OPPIA_BEAM_JOB_STATE.get(
job.current_state, beam_job_models.BeamJobState.UNKNOWN).value
job_state_updated = job.current_state_time.replace(tzinfo=None)
if (beam_job_run_model.latest_job_state != job_state and
job_state == beam_job_models.BeamJobState.FAILED.value):
_put_job_stderr(beam_job_run_model.id, pprint.pformat(job))
beam_job_run_model.latest_job_state = job_state
beam_job_run_model.last_updated = job_state_updated
beam_job_run_model.update_timestamps(update_last_updated_time=False)
|
def refresh_state_of_beam_job_run_model(
beam_job_run_model: beam_job_models.BeamJobRunModel
) -> None:
"""Refreshs the state of the given BeamJobRunModel.
Args:
beam_job_run_model: BeamJobRunModel. The model to update.
"""
job_id = beam_job_run_model.dataflow_job_id
if job_id is None:
beam_job_run_model.latest_job_state = (
beam_job_models.BeamJobState.UNKNOWN.value)
beam_job_run_model.update_timestamps(update_last_updated_time=False)
return
try:
job = dataflow.JobsV1Beta3Client().get_job(dataflow.GetJobRequest(
job_id=job_id, project_id=feconf.OPPIA_PROJECT_ID,
location=feconf.GOOGLE_APP_ENGINE_REGION))
except Exception:
job_state = beam_job_models.BeamJobState.UNKNOWN.value
job_state_updated = beam_job_run_model.last_updated
logging.exception('Failed to update job_id="%s"!' % job_id)
else:
job_state = _GCLOUD_DATAFLOW_JOB_STATE_TO_OPPIA_BEAM_JOB_STATE.get(
job.current_state, beam_job_models.BeamJobState.UNKNOWN).value
job_state_updated = job.current_state_time.replace(tzinfo=None)
if (
beam_job_run_model.latest_job_state != job_state and
job_state == beam_job_models.BeamJobState.FAILED.value
):
_put_job_stderr(beam_job_run_model.id, pprint.pformat(job))
beam_job_run_model.latest_job_state = job_state
beam_job_run_model.last_updated = job_state_updated
beam_job_run_model.update_timestamps(update_last_updated_time=False)
|
49,934 |
def gst_iterate(gst_iterator):
"""Wrap a Gst.Iterator to expose the Python iteration protocol. The
gst-python package exposes similar functionality on Gst.Iterator itself so
this code should be retired in the future once gst-python is broadly enough
available."""
result = Gst.IteratorResult.OK
while result == Gst.IteratorResult.OK:
result, value = next(gst_iterator) # pylint:disable=stop-iteration-return
if result == Gst.IteratorResult.OK:
yield value
elif result == Gst.IteratorResult.ERROR:
raise RuntimeError("Iteration Error")
elif result == Gst.IteratorResult.RESYNC:
raise RuntimeError("Iteration Resync")
|
def gst_iterate(gst_iterator):
"""Wrap a Gst.Iterator to expose the Python iteration protocol. The
gst-python package exposes similar functionality on Gst.Iterator itself so
this code should be retired in the future once gst-python is broadly enough
available."""
result = Gst.IteratorResult.OK
while result == Gst.IteratorResult.OK:
try:
result, value = next(gst_iterator)
except StopIteration:
return
if result == Gst.IteratorResult.OK:
yield value
elif result == Gst.IteratorResult.ERROR:
raise RuntimeError("Iteration Error")
elif result == Gst.IteratorResult.RESYNC:
raise RuntimeError("Iteration Resync")
|
24,511 |
def create_trello_card(
client: TrelloClient,
testerSelector: TesterSelector,
teams: List[str],
pr_num: int,
pr_title: str,
pr_url: str,
pr_labels: List[str],
pr_body: str,
dry_run: bool,
pr_author: str,
config: dict,
card_assignments: dict,
pr_approvers: List[str] = None,
) -> None:
labels = ', '.join(f'`{label}`' for label in sorted(pr_labels))
body = f'''\
Pull request: {pr_url}
Author: `{pr_author}`
Labels: {labels}
{pr_body}'''
for team in teams:
tester_name, member = pick_card_member(config, pr_author, team.lower(), card_assignments, pr_approvers)
if member is None:
tester = _select_trello_tester(client, testerSelector, team, pr_author, pr_num, pr_url)
if tester:
member = tester.id
tester_name = tester.full_name
if dry_run:
echo_success(f'Will create a card for {tester_name}: ', nl=False)
echo_info(pr_title)
continue
creation_attempts = 3
for attempt in range(3):
rate_limited, error, response = client.create_card(team, pr_title, body, member)
if rate_limited:
wait_time = 10
echo_warning(
'Attempt {} of {}: A rate limit in effect, retrying in {} '
'seconds...'.format(attempt + 1, creation_attempts, wait_time)
)
time.sleep(wait_time)
elif error:
if attempt + 1 == creation_attempts:
echo_failure(f'Error: {error}')
break
wait_time = 2
echo_warning(
'Attempt {} of {}: An error has occurred, retrying in {} '
'seconds...'.format(attempt + 1, creation_attempts, wait_time)
)
time.sleep(wait_time)
else:
echo_success(f'Created card for team {team}: ', nl=False)
echo_info(response.json().get('url'))
break
|
def create_trello_card(
client: TrelloClient,
testerSelector: TesterSelector,
teams: List[str],
pr_num: int,
pr_title: str,
pr_url: str,
pr_labels: List[str],
pr_body: str,
dry_run: bool,
pr_author: str,
config: dict,
card_assignments: dict,
pr_approvers: Optional[List[str]] = None,
) -> None:
labels = ', '.join(f'`{label}`' for label in sorted(pr_labels))
body = f'''\
Pull request: {pr_url}
Author: `{pr_author}`
Labels: {labels}
{pr_body}'''
for team in teams:
tester_name, member = pick_card_member(config, pr_author, team.lower(), card_assignments, pr_approvers)
if member is None:
tester = _select_trello_tester(client, testerSelector, team, pr_author, pr_num, pr_url)
if tester:
member = tester.id
tester_name = tester.full_name
if dry_run:
echo_success(f'Will create a card for {tester_name}: ', nl=False)
echo_info(pr_title)
continue
creation_attempts = 3
for attempt in range(3):
rate_limited, error, response = client.create_card(team, pr_title, body, member)
if rate_limited:
wait_time = 10
echo_warning(
'Attempt {} of {}: A rate limit in effect, retrying in {} '
'seconds...'.format(attempt + 1, creation_attempts, wait_time)
)
time.sleep(wait_time)
elif error:
if attempt + 1 == creation_attempts:
echo_failure(f'Error: {error}')
break
wait_time = 2
echo_warning(
'Attempt {} of {}: An error has occurred, retrying in {} '
'seconds...'.format(attempt + 1, creation_attempts, wait_time)
)
time.sleep(wait_time)
else:
echo_success(f'Created card for team {team}: ', nl=False)
echo_info(response.json().get('url'))
break
|
17,710 |
def gen4_query_aggregated_metadata(reporton: str,
ds: Dataset,
aps: List[Dict],
recursive: bool = False,
**kwargs):
"""Query metadata in a metadata store
Query paths (`aps["path"]`) have to be contained in the poth of the ds.
This requirement is due to the colling conventions of the legacy
implementation.
This function doesn't cache anything, hence the caller must
make sure to only call this once per dataset to avoid waste.
Parameters
----------
reporton : {None, 'none', 'datasets', 'files', 'all'}
If `None`, reporting will be based on the `type` property of the
incoming annotated paths.
ds : Dataset
Dataset to query
aps : list
Sequence of annotated paths to query metadata for.
recursive : bool
Whether or not to report metadata underneath all query paths
recursively.
**kwargs
Any other argument will be passed on to the query result dictionary.
Returns
-------
generator
Of result dictionaries.
"""
annotated_paths = aps
dataset = ds
matching_types = {
None: None,
"files": ("file",),
"datasets": ("dataset",),
"all": ("dataset", "file")
}[reporton]
for annotated_path in annotated_paths:
relative_path = Path(annotated_path["path"]).relative_to(dataset.pathobj)
if matching_types is None:
matching_types = (annotated_path["type"],)
try:
for dump_result in Dump()(dataset=dataset.pathobj,
path=str(relative_path),
recursive=recursive,
result_renderer="disabled",
return_type="generator"):
if dump_result["status"] != "ok":
continue
metadata = dump_result["metadata"]
if metadata["type"] not in matching_types:
continue
yield {
**kwargs,
"status": "ok",
"type": metadata["type"],
"path": str(dump_result["path"]),
"dsid": metadata["dataset_id"],
"refcommit": metadata["dataset_version"],
"metadata": {
metadata["extractor_name"]: metadata["extracted_metadata"]
}
}
except NoMetadataStoreFound:
lgr.warning(f"Found no gen4-metadata in dataset {dataset.pathobj}.")
if len(matching_types) == 2:
matching_type = "all"
elif len(matching_types) == 0:
matching_type = "none"
else:
matching_type = matching_types[0]
yield {
**kwargs,
'path': str(ds.pathobj / relative_path),
'status': 'impossible',
'message': f'Dataset at {ds.pathobj} does not contain gen4 '
f'metadata',
'type': matching_type
}
return None
|
def gen4_query_aggregated_metadata(reporton: str,
ds: Dataset,
aps: List[Dict],
recursive: bool = False,
**kwargs):
"""Query metadata in a metadata store
Query paths (`aps["path"]`) have to be contained in the poth of the ds.
This requirement is due to the colling conventions of the legacy
implementation.
This function doesn't cache anything, hence the caller must
make sure to only call this once per dataset to avoid waste.
Parameters
----------
reporton : {None, 'none', 'datasets', 'files', 'all'}
If `None`, reporting will be based on the `type` property of the
incoming annotated paths.
ds : Dataset
Dataset to query
aps : list
Sequence of annotated paths to query metadata for.
recursive : bool
Whether or not to report metadata underneath all query paths
recursively.
**kwargs
Any other argument will be passed on to the query result dictionary.
Returns
-------
generator
Of result dictionaries.
"""
annotated_paths = aps
dataset = ds
matching_types = {
None: None,
"files": ("file",),
"datasets": ("dataset",),
"all": ("dataset", "file")
}[reporton]
for annotated_path in annotated_paths:
relative_path = Path(annotated_path["path"]).relative_to(dataset.pathobj)
if matching_types is None:
matching_types = (annotated_path["type"],)
try:
for dump_result in Dump()(dataset=dataset.pathobj,
path=str(relative_path),
recursive=recursive,
result_renderer="disabled",
return_type="generator"):
if dump_result["status"] != "ok":
continue
metadata = dump_result["metadata"]
if metadata["type"] not in matching_types:
continue
yield {
**kwargs,
"status": "ok",
"type": metadata["type"],
"path": str(dump_result["path"]),
"dsid": metadata["dataset_id"],
"refcommit": metadata["dataset_version"],
"metadata": {
metadata["extractor_name"]: metadata["extracted_metadata"]
}
}
except NoMetadataStoreFound:
lgr.warning("Found no gen4-metadata in dataset %s.", dataset.pathobj)
if len(matching_types) == 2:
matching_type = "all"
elif len(matching_types) == 0:
matching_type = "none"
else:
matching_type = matching_types[0]
yield {
**kwargs,
'path': str(ds.pathobj / relative_path),
'status': 'impossible',
'message': f'Dataset at {ds.pathobj} does not contain gen4 '
f'metadata',
'type': matching_type
}
return None
|
17,628 |
def edges_aux(vertices):
'''create auxiliary edges array '''
v_len = [len(v) for v in vertices]
v_len_max = max(v_len)
np_in = np.arange(v_len_max - 1)
np_edges = np.array([np_in, np_in + 1]).T
return [np_edges]
|
def edges_aux(vertices):
'''create auxiliary edges array '''
v_len = [len(v) for v in vertices]
v_len_max = max(v_len)
np_edges = np.add.outer(np.arange(v_len_max - 1), [0, 1])
return [np_edges]
|
6,786 |
def get_labels(fields, doctype):
"""get column labels based on column names"""
labels = []
for key in fields:
key = key.split(" as ")[0]
if key.startswith('count('): continue
if key.startswith('sum('): continue
if key.startswith('avg('): continue
if "." in key:
parenttype, fieldname = key.split(".")[0][4:-1], key.split(".")[1].strip("`")
else:
parenttype = doctype
fieldname = fieldname.strip("`")
df = frappe.get_meta(parenttype).get_field(fieldname)
label = df.label if df else fieldname.title()
if label in labels:
label = doctype + ": " + label
labels.append(label)
return labels
|
def get_labels(fields, doctype):
"""get column labels based on column names"""
labels = []
for key in fields:
key = key.split(" as ")[0]
if key.startswith(('count(', 'sum(', 'avg(')): continue
if key.startswith('sum('): continue
if key.startswith('avg('): continue
if "." in key:
parenttype, fieldname = key.split(".")[0][4:-1], key.split(".")[1].strip("`")
else:
parenttype = doctype
fieldname = fieldname.strip("`")
df = frappe.get_meta(parenttype).get_field(fieldname)
label = df.label if df else fieldname.title()
if label in labels:
label = doctype + ": " + label
labels.append(label)
return labels
|
25,593 |
def configure_pfs_or_exit(
pfs_url: str,
routing_mode: RoutingMode,
service_registry: Optional[ServiceRegistry],
node_network_id: ChainID,
token_network_registry_address: TokenNetworkRegistryAddress,
pathfinding_max_fee: TokenAmount,
) -> PFSInfo:
"""
Take in the given pfs_address argument, the service registry and find out a
pfs address to use.
If pfs_url is provided we use that.
If pfs_url is 'auto' then we randomly choose a PFS address from the registry
"""
msg = "Invalid code path; configure_pfs needs routing mode PFS"
assert routing_mode == RoutingMode.PFS, msg
msg = "With PFS routing mode we shouldn't get to configure_pfs with pfs_address being None"
assert pfs_url, msg
if pfs_url == MATRIX_AUTO_SELECT_SERVER:
if service_registry is None:
raise RaidenError(
"Raiden was started with routing mode set to PFS, the pathfinding service address "
"set to 'auto' but no service registry address was given. Either specifically "
"provide a PFS address or provide a service registry address."
)
block_hash = service_registry.client.get_confirmed_blockhash()
maybe_pfs_url = get_random_pfs(
service_registry=service_registry,
block_identifier=block_hash,
pathfinding_max_fee=pathfinding_max_fee,
)
if maybe_pfs_url is None:
raise RaidenError(
"No registered Pathfinding service seems to be running"
"and basic routing is not used."
)
else:
pfs_url = maybe_pfs_url
try:
pathfinding_service_info = get_pfs_info(pfs_url)
except ServiceRequestFailed as e:
raise RaidenError(
f"There was an error with the Pathfinding Service with address "
f"{pfs_url}. Raiden will shut down. Please try a different Pathfinding Service. \n"
f"Error Message: {str(e)}"
)
if pathfinding_service_info.price > 0 and not pathfinding_service_info.payment_address:
raise RaidenError(
f"The Pathfinding Service at {pfs_url} did not provide a payment address. "
f"Raiden will shut down. Please try a different Pathfinding Service."
)
if not node_network_id == pathfinding_service_info.chain_id:
raise RaidenError(
f"Invalid reply from Pathfinding Service {pfs_url}\n"
f"Pathfinding Service is not operating on the same network "
f"({pathfinding_service_info.chain_id}) as your node is ({node_network_id}).\n"
f"Raiden will shut down. Please choose a different Pathfinding Service."
)
if pathfinding_service_info.token_network_registry_address != token_network_registry_address:
raise RaidenError(
f"Invalid reply from Pathfinding Service {pfs_url}"
f"Pathfinding Service is not operating on the same Token Network Registry "
f"({to_checksum_address(pathfinding_service_info.token_network_registry_address)})"
f" as your node ({to_checksum_address(token_network_registry_address)}).\n"
f"Raiden will shut down. Please choose a different Pathfinding Service."
)
click.secho(
f"You have chosen the Pathfinding Service at {pfs_url}.\n"
f"Operator: {pathfinding_service_info.operator}, "
f"running version: {pathfinding_service_info.version}, "
f"chain_id: {pathfinding_service_info.chain_id}.\n"
f"Fees will be paid to {to_checksum_address(pathfinding_service_info.payment_address)}. "
f"Each request costs {to_rdn(pathfinding_service_info.price)} RDN.\n"
f"Message from the Pathfinding Service:\n{pathfinding_service_info.message}"
)
log.info("Using Pathfinding Service", pfs_info=pathfinding_service_info)
return pathfinding_service_info
|
def configure_pfs_or_exit(
pfs_url: str,
routing_mode: RoutingMode,
service_registry: Optional[ServiceRegistry],
node_network_id: ChainID,
token_network_registry_address: TokenNetworkRegistryAddress,
pathfinding_max_fee: TokenAmount,
) -> PFSInfo:
"""
Take in the given pfs_address argument, the service registry and find out a
pfs address to use.
If pfs_url is provided we use that.
If pfs_url is 'auto' then we randomly choose a PFS address from the registry
"""
msg = "Invalid code path; configure_pfs needs routing mode PFS"
assert routing_mode == RoutingMode.PFS, msg
msg = "With PFS routing mode we shouldn't get to configure_pfs with pfs_address being None"
assert pfs_url, msg
if pfs_url == MATRIX_AUTO_SELECT_SERVER:
if service_registry is None:
raise RaidenError(
"Raiden was started with routing mode set to PFS, the pathfinding service address "
"set to 'auto' but no service registry address was given. Either specifically "
"provide a PFS address or provide a service registry address."
)
block_hash = service_registry.client.get_confirmed_blockhash()
maybe_pfs_url = get_random_pfs(
service_registry=service_registry,
block_identifier=block_hash,
pathfinding_max_fee=pathfinding_max_fee,
)
if maybe_pfs_url is None:
raise RaidenError(
"No registered Pathfinding Service seems to be running"
"and basic routing is not used."
)
else:
pfs_url = maybe_pfs_url
try:
pathfinding_service_info = get_pfs_info(pfs_url)
except ServiceRequestFailed as e:
raise RaidenError(
f"There was an error with the Pathfinding Service with address "
f"{pfs_url}. Raiden will shut down. Please try a different Pathfinding Service. \n"
f"Error Message: {str(e)}"
)
if pathfinding_service_info.price > 0 and not pathfinding_service_info.payment_address:
raise RaidenError(
f"The Pathfinding Service at {pfs_url} did not provide a payment address. "
f"Raiden will shut down. Please try a different Pathfinding Service."
)
if not node_network_id == pathfinding_service_info.chain_id:
raise RaidenError(
f"Invalid reply from Pathfinding Service {pfs_url}\n"
f"Pathfinding Service is not operating on the same network "
f"({pathfinding_service_info.chain_id}) as your node is ({node_network_id}).\n"
f"Raiden will shut down. Please choose a different Pathfinding Service."
)
if pathfinding_service_info.token_network_registry_address != token_network_registry_address:
raise RaidenError(
f"Invalid reply from Pathfinding Service {pfs_url}"
f"Pathfinding Service is not operating on the same Token Network Registry "
f"({to_checksum_address(pathfinding_service_info.token_network_registry_address)})"
f" as your node ({to_checksum_address(token_network_registry_address)}).\n"
f"Raiden will shut down. Please choose a different Pathfinding Service."
)
click.secho(
f"You have chosen the Pathfinding Service at {pfs_url}.\n"
f"Operator: {pathfinding_service_info.operator}, "
f"running version: {pathfinding_service_info.version}, "
f"chain_id: {pathfinding_service_info.chain_id}.\n"
f"Fees will be paid to {to_checksum_address(pathfinding_service_info.payment_address)}. "
f"Each request costs {to_rdn(pathfinding_service_info.price)} RDN.\n"
f"Message from the Pathfinding Service:\n{pathfinding_service_info.message}"
)
log.info("Using Pathfinding Service", pfs_info=pathfinding_service_info)
return pathfinding_service_info
|
16,152 |
def validate_requirements_format(integration: Integration) -> bool:
"""Validate requirements format.
Returns if an error was added.
"""
start_errors = len(integration.errors)
for req in integration.requirements:
if " " in req:
integration.add_error(
"requirements",
f'Requirement "{req}" contains a space',
)
continue
pkg, sep, version = req.partition("==")
if not sep:
integration.add_error(
"requirements",
'Requirements need to be pinned "<pkg name>==<version>".',
)
continue
if AwesomeVersion(version).strategy == AwesomeVersionStrategy.UNKNOWN:
integration.add_error("requirements", "Unable to parse package version.")
continue
return len(integration.errors) > start_errors
|
def validate_requirements_format(integration: Integration) -> bool:
"""Validate requirements format.
Returns if an error was added.
"""
start_errors = len(integration.errors)
for req in integration.requirements:
if " " in req:
integration.add_error(
"requirements",
f'Requirement "{req}" contains a space',
)
continue
pkg, sep, version = req.partition("==")
if not sep:
integration.add_error(
"requirements",
'Requirements need to be pinned "<pkg name>==<version>".',
)
continue
if AwesomeVersion(version).strategy == AwesomeVersionStrategy.UNKNOWN:
integration.add_error("requirements", f"Unable to parse package version ({version}) for {pkg}.")
continue
return len(integration.errors) > start_errors
|
40,108 |
def create_symbolic_link_edges(data_graph):
edge_id = 0
for node in data_graph['nodes']:
if node['group'] == 'inode/symlink':
link_to = node['full_file_type'].split(' ')[3].split('\'')[1]
for match in data_graph['nodes']:
if match['label'] == link_to:
edge = {'source': node['id'], 'target': match['id'], 'id': edge_id}
data_graph['edges'].append(edge)
edge_id += 1
return data_graph, edge_id
|
def create_symbolic_link_edges(data_graph):
edge_id = 0
for node in data_graph['nodes']:
if node['group'] == 'inode/symlink':
link_to = node['full_file_type'].split('\'')[1]
for match in data_graph['nodes']:
if match['label'] == link_to:
edge = {'source': node['id'], 'target': match['id'], 'id': edge_id}
data_graph['edges'].append(edge)
edge_id += 1
return data_graph, edge_id
|
44,142 |
def observable(fermion_ops, init_term=0, mapping="jordan_wigner", wires=None):
r"""Builds the Fermion many-body observable whose expectation value can be
measured in PennyLane.
The second-quantized operator of the Fermion many-body system can combine one-particle
and two-particle operators as in the case of electronic Hamiltonians :math:`\hat{H}`:
.. math::
\hat{H} = \sum_{\alpha, \beta} \langle \alpha \vert \hat{t}^{(1)} +
\cdots + \hat{t}^{(n)} \vert \beta \rangle ~ \hat{c}_\alpha^\dagger \hat{c}_\beta
+ \frac{1}{2} \sum_{\alpha, \beta, \gamma, \delta}
\langle \alpha, \beta \vert \hat{v}^{(1)} + \cdots + \hat{v}^{(n)}
\vert \gamma, \delta \rangle ~ \hat{c}_\alpha^\dagger \hat{c}_\beta^\dagger
\hat{c}_\gamma \hat{c}_\delta
In the latter equations the indices :math:`\alpha, \beta, \gamma, \delta` run over the
basis of single-particle states. The operators :math:`\hat{c}^\dagger` and :math:`\hat{c}`
are the particle creation and annihilation operators, respectively.
:math:`\langle \alpha \vert \hat{t} \vert \beta \rangle` denotes the matrix element of
the single-particle operator :math:`\hat{t}` entering the observable. For example,
in electronic structure calculations, this is the case for: the kinetic energy operator,
the nuclei Coulomb potential, or any other external fields included in the Hamiltonian.
On the other hand, :math:`\langle \alpha, \beta \vert \hat{v} \vert \gamma, \delta \rangle`
denotes the matrix element of the two-particle operator :math:`\hat{v}`, for example, the
Coulomb interaction between the electrons.
- The observable is built by adding the operators
:math:`\sum_{\alpha, \beta} t_{\alpha\beta}^{(i)}
\hat{c}_\alpha^\dagger \hat{c}_\beta` and
:math:`\frac{1}{2} \sum_{\alpha, \beta, \gamma, \delta}
v_{\alpha\beta\gamma\delta}^{(i)}
\hat{c}_\alpha^\dagger \hat{c}_\beta^\dagger \hat{c}_\gamma \hat{c}_\delta`.
- Second-quantized operators contributing to the
many-body observable must be represented using the `FermionOperator
<https://github.com/quantumlib/OpenFermion/blob/master/docs/
tutorials/intro_to_openfermion.ipynb>`_ data structure as implemented in OpenFermion.
See the functions :func:`~.one_particle` and :func:`~.two_particle` to build the
FermionOperator representations of one-particle and two-particle operators.
- The function uses tools of `OpenFermion <https://github.com/quantumlib/OpenFermion>`_
to map the resulting fermionic Hamiltonian to the basis of Pauli matrices via the
Jordan-Wigner or Bravyi-Kitaev transformation. Finally, the qubit operator is converted
to a PennyLane observable by the function :func:`~.convert_observable`.
Args:
fermion_ops (list[FermionOperator]): list containing the FermionOperator data structures
representing the one-particle and/or two-particle operators entering the many-body
observable
init_term (float): Any quantity required to initialize the many-body observable. For
example, this can be used to pass the nuclear-nuclear repulsion energy :math:`V_{nn}`
which is typically included in the electronic Hamiltonian of molecules.
mapping (str): Specifies the fermion-to-qubit mapping. Input values can
be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
pennylane.Hamiltonian: the fermionic-to-qubit transformed observable
**Example**
>>> t = FermionOperator("0^ 0", 0.5) + FermionOperator("1^ 1", 0.25)
>>> v = FermionOperator("1^ 0^ 0 1", -0.15) + FermionOperator("2^ 0^ 2 0", 0.3)
>>> print(observable([t, v], mapping="jordan_wigner"))
(0.2625) [I0]
+ (-0.1375) [Z0]
+ (-0.0875) [Z1]
+ (-0.0375) [Z0 Z1]
+ (0.075) [Z2]
+ (-0.075) [Z0 Z2]
"""
if mapping.strip().lower() not in ("jordan_wigner", "bravyi_kitaev"):
raise TypeError(
f"The '{mapping}' transformation is not available. \n "
f"Please set 'mapping' to 'jordan_wigner' or 'bravyi_kitaev'."
)
# Initialize the FermionOperator
mb_obs = openfermion.ops.FermionOperator("") * init_term
for ops in fermion_ops:
if not isinstance(ops, openfermion.ops.FermionOperator):
raise TypeError(
f"Elements in the lists are expected to be of type 'FermionOperator'; got {type(ops)}"
)
mb_obs += ops
# Map the fermionic operator to a qubit operator
if mapping.strip().lower() == "bravyi_kitaev":
return qml.qchem.convert.import_operator(
openfermion.transforms.bravyi_kitaev(mb_obs), wires=wires
)
return qml.qchem.convert.import_operator(
openfermion.transforms.jordan_wigner(mb_obs), wires=wires
)
|
def observable(fermion_ops, init_term=0, mapping="jordan_wigner", wires=None):
r"""Builds the Fermion many-body observable whose expectation value can be
measured in PennyLane.
The second-quantized operator of the fermionic many-body system can combine one-particle
and two-particle operators as in the case of electronic Hamiltonians :math:`\hat{H}`:
.. math::
\hat{H} = \sum_{\alpha, \beta} \langle \alpha \vert \hat{t}^{(1)} +
\cdots + \hat{t}^{(n)} \vert \beta \rangle ~ \hat{c}_\alpha^\dagger \hat{c}_\beta
+ \frac{1}{2} \sum_{\alpha, \beta, \gamma, \delta}
\langle \alpha, \beta \vert \hat{v}^{(1)} + \cdots + \hat{v}^{(n)}
\vert \gamma, \delta \rangle ~ \hat{c}_\alpha^\dagger \hat{c}_\beta^\dagger
\hat{c}_\gamma \hat{c}_\delta
In the latter equations the indices :math:`\alpha, \beta, \gamma, \delta` run over the
basis of single-particle states. The operators :math:`\hat{c}^\dagger` and :math:`\hat{c}`
are the particle creation and annihilation operators, respectively.
:math:`\langle \alpha \vert \hat{t} \vert \beta \rangle` denotes the matrix element of
the single-particle operator :math:`\hat{t}` entering the observable. For example,
in electronic structure calculations, this is the case for: the kinetic energy operator,
the nuclei Coulomb potential, or any other external fields included in the Hamiltonian.
On the other hand, :math:`\langle \alpha, \beta \vert \hat{v} \vert \gamma, \delta \rangle`
denotes the matrix element of the two-particle operator :math:`\hat{v}`, for example, the
Coulomb interaction between the electrons.
- The observable is built by adding the operators
:math:`\sum_{\alpha, \beta} t_{\alpha\beta}^{(i)}
\hat{c}_\alpha^\dagger \hat{c}_\beta` and
:math:`\frac{1}{2} \sum_{\alpha, \beta, \gamma, \delta}
v_{\alpha\beta\gamma\delta}^{(i)}
\hat{c}_\alpha^\dagger \hat{c}_\beta^\dagger \hat{c}_\gamma \hat{c}_\delta`.
- Second-quantized operators contributing to the
many-body observable must be represented using the `FermionOperator
<https://github.com/quantumlib/OpenFermion/blob/master/docs/
tutorials/intro_to_openfermion.ipynb>`_ data structure as implemented in OpenFermion.
See the functions :func:`~.one_particle` and :func:`~.two_particle` to build the
FermionOperator representations of one-particle and two-particle operators.
- The function uses tools of `OpenFermion <https://github.com/quantumlib/OpenFermion>`_
to map the resulting fermionic Hamiltonian to the basis of Pauli matrices via the
Jordan-Wigner or Bravyi-Kitaev transformation. Finally, the qubit operator is converted
to a PennyLane observable by the function :func:`~.convert_observable`.
Args:
fermion_ops (list[FermionOperator]): list containing the FermionOperator data structures
representing the one-particle and/or two-particle operators entering the many-body
observable
init_term (float): Any quantity required to initialize the many-body observable. For
example, this can be used to pass the nuclear-nuclear repulsion energy :math:`V_{nn}`
which is typically included in the electronic Hamiltonian of molecules.
mapping (str): Specifies the fermion-to-qubit mapping. Input values can
be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
pennylane.Hamiltonian: the fermionic-to-qubit transformed observable
**Example**
>>> t = FermionOperator("0^ 0", 0.5) + FermionOperator("1^ 1", 0.25)
>>> v = FermionOperator("1^ 0^ 0 1", -0.15) + FermionOperator("2^ 0^ 2 0", 0.3)
>>> print(observable([t, v], mapping="jordan_wigner"))
(0.2625) [I0]
+ (-0.1375) [Z0]
+ (-0.0875) [Z1]
+ (-0.0375) [Z0 Z1]
+ (0.075) [Z2]
+ (-0.075) [Z0 Z2]
"""
if mapping.strip().lower() not in ("jordan_wigner", "bravyi_kitaev"):
raise TypeError(
f"The '{mapping}' transformation is not available. \n "
f"Please set 'mapping' to 'jordan_wigner' or 'bravyi_kitaev'."
)
# Initialize the FermionOperator
mb_obs = openfermion.ops.FermionOperator("") * init_term
for ops in fermion_ops:
if not isinstance(ops, openfermion.ops.FermionOperator):
raise TypeError(
f"Elements in the lists are expected to be of type 'FermionOperator'; got {type(ops)}"
)
mb_obs += ops
# Map the fermionic operator to a qubit operator
if mapping.strip().lower() == "bravyi_kitaev":
return qml.qchem.convert.import_operator(
openfermion.transforms.bravyi_kitaev(mb_obs), wires=wires
)
return qml.qchem.convert.import_operator(
openfermion.transforms.jordan_wigner(mb_obs), wires=wires
)
|
58,518 |
def collective_to_envs(collective, envs):
"""A helper method that get information from collective and add to envs.
Args:
collective(dict): collective information
envs(dict): os environment dict
Returns:
envs(dict): modified os environment dict
"""
if envs is not None:
assert all([
"collective_group_name", "collective_rank",
"collective_world_size", "collective_backend"
]) not in envs
else:
envs = {}
envs["collective_group_name"] = str(collective["group_name"])
envs["collective_rank"] = str(collective["rank"])
envs["collective_world_size"] = str(collective["world_size"])
envs["collective_backend"] = str(collective["backend"])
return envs
|
def collective_to_envs(collective, envs):
"""A helper method that get information from collective and add to envs.
Args:
collective (dict): collective information
envs (dict): os environment dict
Returns:
envs (dict): modified os environment dict
"""
if envs is not None:
assert all([
"collective_group_name", "collective_rank",
"collective_world_size", "collective_backend"
]) not in envs
else:
envs = {}
envs["collective_group_name"] = str(collective["group_name"])
envs["collective_rank"] = str(collective["rank"])
envs["collective_world_size"] = str(collective["world_size"])
envs["collective_backend"] = str(collective["backend"])
return envs
|
45,934 |
def undistort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor,
new_K: Optional[torch.Tensor] = None, iters: int = 5) -> torch.Tensor:
r"""Compensate for lens distortion a set of 2D image points.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
new_K: New intrinsic camera matrix with shape :math:`(*, 3, 3)`. Default: None, in this case K is used.
iters: Number of undistortion iterations. Default: 5.
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
Example:
>>> _ = torch.manual_seed(0)
>>> x = torch.rand(1, 4, 2)
>>> K = torch.eye(3)[None]
>>> dist = torch.rand(1, 4)
>>> undistort_points(x, K, dist)
tensor([[[-0.1513, -0.1165],
[ 0.0711, 0.1100],
[-0.0697, 0.0228],
[-0.1843, -0.1606]]])
"""
if points.dim() < 2 and points.shape[-1] != 2:
raise ValueError(f'points shape is invalid. Got {points.shape}.')
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if new_K is None:
new_K = K
elif new_K.shape[-2:] != (3, 3):
raise ValueError(f'new_K matrix shape is invalid. Got {new_K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f"Invalid number of distortion coefficients. Got {dist.shape[-1]}")
# Adding zeros to obtain vector with 14 coeffs.
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
new_cx: torch.Tensor = new_K[..., 0:1, 2] # princial point in x (Bx1)
new_cy: torch.Tensor = new_K[..., 1:2, 2] # princial point in y (Bx1)
new_fx: torch.Tensor = new_K[..., 0:1, 0] # focal in x (Bx1)
new_fy: torch.Tensor = new_K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
inv_tilt = tilt_projection(dist[..., 12], dist[..., 13], True)
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
x, y = transform_points(inv_tilt, torch.stack([x, y], dim=-1)).unbind(-1)
# Iteratively undistort points
x0, y0 = x, y
for _ in range(iters):
r2 = x * x + y * y
inv_rad_poly = (1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3) / (
1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3
)
deltaX = (
2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
deltaY = (
dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
x = (x0 - deltaX) * inv_rad_poly
y = (y0 - deltaY) * inv_rad_poly
# Convert points from normalized camera coordinates to pixel coordinates
x = new_fx * x + new_cx
y = new_fy * y + new_cy
return torch.stack([x, y], -1)
|
def undistort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor,
new_K: Optional[torch.Tensor] = None, iters: int = 5) -> torch.Tensor:
r"""Compensate for lens distortion a set of 2D image points.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
new_K: New intrinsic camera matrix with shape :math:`(*, 3, 3)`. Default: None, in this case K is used.
num_iters: Number of undistortion iterations. Default: 5.
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
Example:
>>> _ = torch.manual_seed(0)
>>> x = torch.rand(1, 4, 2)
>>> K = torch.eye(3)[None]
>>> dist = torch.rand(1, 4)
>>> undistort_points(x, K, dist)
tensor([[[-0.1513, -0.1165],
[ 0.0711, 0.1100],
[-0.0697, 0.0228],
[-0.1843, -0.1606]]])
"""
if points.dim() < 2 and points.shape[-1] != 2:
raise ValueError(f'points shape is invalid. Got {points.shape}.')
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if new_K is None:
new_K = K
elif new_K.shape[-2:] != (3, 3):
raise ValueError(f'new_K matrix shape is invalid. Got {new_K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f"Invalid number of distortion coefficients. Got {dist.shape[-1]}")
# Adding zeros to obtain vector with 14 coeffs.
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
new_cx: torch.Tensor = new_K[..., 0:1, 2] # princial point in x (Bx1)
new_cy: torch.Tensor = new_K[..., 1:2, 2] # princial point in y (Bx1)
new_fx: torch.Tensor = new_K[..., 0:1, 0] # focal in x (Bx1)
new_fy: torch.Tensor = new_K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
inv_tilt = tilt_projection(dist[..., 12], dist[..., 13], True)
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
x, y = transform_points(inv_tilt, torch.stack([x, y], dim=-1)).unbind(-1)
# Iteratively undistort points
x0, y0 = x, y
for _ in range(iters):
r2 = x * x + y * y
inv_rad_poly = (1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3) / (
1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3
)
deltaX = (
2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
deltaY = (
dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
x = (x0 - deltaX) * inv_rad_poly
y = (y0 - deltaY) * inv_rad_poly
# Convert points from normalized camera coordinates to pixel coordinates
x = new_fx * x + new_cx
y = new_fy * y + new_cy
return torch.stack([x, y], -1)
|
8,684 |
def handle_init(options):
"""Use config's wizard to initialize a new configuration file for the bot
:param options: argument parser's parsed options
.. note::
Due to how the config's wizard works, the configuration filename's
extension must be ``.cfg``.
"""
config_filename = utils.find_config(
config.DEFAULT_HOMEDIR,
getattr(options, 'config', None) or 'default')
config_name, ext = os.path.splitext(config_filename)
if ext and ext != '.cfg':
tools.stderr('Configuration wizard accepts .cfg file only')
return 1
elif not ext:
config_filename = config_name + '.cfg'
if os.path.isfile(config_filename):
tools.stderr('Configuration file %s already exists' % config_filename)
return 1
print('Starting Sopel config wizard for: %s' % config_filename)
config._wizard('all', config_name)
|
def handle_init(options):
"""Use config's wizard to initialize a new configuration file for the bot
:param options: argument parser's parsed options
.. note::
Due to how the config's wizard works, the configuration filename's
extension must be ``.cfg``.
"""
config_filename = utils.find_config(
config.DEFAULT_HOMEDIR,
getattr(options, 'config', None) or 'default')
config_name, ext = os.path.splitext(config_filename)
if ext and ext != '.cfg':
tools.stderr('Configuration wizard accepts .cfg files only')
return 1
elif not ext:
config_filename = config_name + '.cfg'
if os.path.isfile(config_filename):
tools.stderr('Configuration file %s already exists' % config_filename)
return 1
print('Starting Sopel config wizard for: %s' % config_filename)
config._wizard('all', config_name)
|
59,234 |
def _is_syntax_error(err1, err2):
if all("was never closed" in repr(err) for err in (err1, err2)):
return False
if repr(err1) == repr(err2):
return True
return False
|
def _is_syntax_error(err1, err2):
rep1 = repr(err1)
rep2 = repr(err2)
if "was never closed" in rep1 and"was never closed" in rep2:
return False
if rep1 == rep2:
return True
return False
|
57,207 |
def get_exploration_version_valid_info(exploration_id, revert_to_version):
"""Tests whether an exploration can be reverted to the given version
number. Does not commit any changes.
Args:
exploration_id: str. The id of the exploration to be reverted to the
current version.
revert_to_version: int. The version to which the given exploration
is to be reverted.
Returns:
dict{'valid': bool, 'details': Optional[str]}. If the revert_to_version
passes all backend validation checks, then 'details' is None.
Otherwise, 'details' stores the validation error as a string.
"""
# Validate the previous version of the exploration.
exploration = exp_fetchers.get_exploration_by_id(
exploration_id, version=revert_to_version)
exploration_rights = rights_manager.get_exploration_rights(exploration.id)
try:
if exploration_rights.status != rights_domain.ACTIVITY_STATUS_PRIVATE:
exploration.validate(strict=True)
else:
exploration.validate()
except Exception as ex:
return {'valid': False, 'details': str(ex)}
return {'valid': True, 'details': None}
|
def get_exploration_version_valid_info(exploration_id, revert_to_version):
"""Tests whether an exploration can be reverted to the given version
number. Does not commit any changes.
Args:
exploration_id: str. The id of the exploration to be reverted to the
current version.
revert_to_version: int. The version to which the given exploration
is to be reverted.
Returns:
dict{'valid': bool, 'details': Optional[str]}. If the revert_to_version
passes all backend validation checks, then 'details' is None.
Otherwise, 'details' stores the validation error as a string.
"""
# Validate the previous version of the exploration.
exploration = exp_fetchers.get_exploration_by_id(
exploration_id, version=revert_to_version)
exploration_rights = rights_manager.get_exploration_rights(exploration.id)
try:
exploration.validate(
strict=exploration_rights.status == rights_domain.ACTIVITY_STATUS_PUBLIC)
except Exception as ex:
return {'valid': False, 'details': str(ex)}
return {'valid': True, 'details': None}
|
48,299 |
def run_module():
# available arguments/parameters that a user can pass
module_args = dict(
state=dict(type='str', default='present', choices=['present', 'absent', 'opened', 'closed']),
device=dict(type='str'),
name=dict(type='str'),
keyfile=dict(type='path'),
new_keyfile=dict(type='path'),
remove_keyfile=dict(type='path'),
force_remove_last_key=dict(type='bool', default=False),
)
# seed the result dict in the object
result = dict(
changed=False,
name=None
)
module = AnsibleModule(argument_spec=module_args,
supports_check_mode=True)
crypt = CryptHandler(module)
conditions = ConditionsHandler(module, crypt)
# The conditions are in order to allow more operations in one run.
# (e.g. create luks and add a key to it)
# luks create
if conditions.luks_create():
if not module.check_mode:
try:
crypt.run_luks_create(module.params['device'],
module.params['keyfile'])
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
# luks open
name = conditions.opened_luks_name()
if name is not None:
result['name'] = name
if conditions.luks_open():
name = module.params['name']
if name is None:
try:
name = crypt.generate_luks_name(module.params['device'])
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
if not module.check_mode:
try:
crypt.run_luks_open(module.params['device'],
module.params['keyfile'],
name)
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
result['name'] = name
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
# luks close
if conditions.luks_close():
if module.params['device'] is not None:
try:
name = crypt.get_container_name_by_device(
module.params['device'])
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
else:
name = module.params['name']
if not module.check_mode:
try:
crypt.run_luks_close(name)
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
# luks add key
if conditions.luks_add_key():
if not module.check_mode:
try:
crypt.run_luks_add_key(module.params['device'],
module.params['keyfile'],
module.params['new_keyfile'])
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
# luks remove key
if conditions.luks_remove_key():
if not module.check_mode:
try:
crypt.run_luks_remove_key(module.params['device'],
module.params['remove_keyfile'],
force_remove_last_key=module.params['force_remove_last_key'])
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
# luks remove
if conditions.luks_remove():
if not module.check_mode:
try:
crypt.run_luks_remove(module.params['device'])
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
# Success - return result
module.exit_json(**result)
|
def run_module():
# available arguments/parameters that a user can pass
module_args = dict(
state=dict(type='str', default='present', choices=['present', 'absent', 'opened', 'closed']),
device=dict(type='str'),
name=dict(type='str'),
keyfile=dict(type='path'),
new_keyfile=dict(type='path'),
remove_keyfile=dict(type='path'),
force_remove_last_key=dict(type='bool', default=False),
)
# seed the result dict in the object
result = dict(
changed=False,
name=None
)
module = AnsibleModule(argument_spec=module_args,
supports_check_mode=True)
crypt = CryptHandler(module)
conditions = ConditionsHandler(module, crypt)
# The conditions are in order to allow more operations in one run.
# (e.g. create luks and add a key to it)
# luks create
if conditions.luks_create():
if not module.check_mode:
try:
crypt.run_luks_create(module.params['device'],
module.params['keyfile'])
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
if not exists:
result['changed'] = True
else:
result['changed'] = False
if module.check_mode:
module.exit_json(**result)
# luks open
name = conditions.opened_luks_name()
if name is not None:
result['name'] = name
if conditions.luks_open():
name = module.params['name']
if name is None:
try:
name = crypt.generate_luks_name(module.params['device'])
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
if not module.check_mode:
try:
crypt.run_luks_open(module.params['device'],
module.params['keyfile'],
name)
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
result['name'] = name
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
# luks close
if conditions.luks_close():
if module.params['device'] is not None:
try:
name = crypt.get_container_name_by_device(
module.params['device'])
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
else:
name = module.params['name']
if not module.check_mode:
try:
crypt.run_luks_close(name)
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
# luks add key
if conditions.luks_add_key():
if not module.check_mode:
try:
crypt.run_luks_add_key(module.params['device'],
module.params['keyfile'],
module.params['new_keyfile'])
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
# luks remove key
if conditions.luks_remove_key():
if not module.check_mode:
try:
crypt.run_luks_remove_key(module.params['device'],
module.params['remove_keyfile'],
force_remove_last_key=module.params['force_remove_last_key'])
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
# luks remove
if conditions.luks_remove():
if not module.check_mode:
try:
crypt.run_luks_remove(module.params['device'])
except ValueError as e:
module.fail_json(msg="luks_device error: %s" % e)
result['changed'] = True
if module.check_mode:
module.exit_json(**result)
# Success - return result
module.exit_json(**result)
|
3,023 |
def interpolate_1d_fill(
values,
method="pad",
axis=0,
limit=None,
limit_area=None,
fill_value=None,
dtype=None,
):
"""
This is a 1D-versoin of `interpolate_2d`, which is used for methods `pad`
and `backfill` when interpolating. This 1D-version is necessary to be
able to handle kwarg `limit_area` via the function
` _derive_indices_of_nans_to_preserve`. It is used the same way as the
1D-interpolation functions which are based on scipy-interpolation, i.e.
via np.apply_along_axis.
"""
if method == "pad":
limit_direction = "forward"
elif method == "backfill":
limit_direction = "backward"
else:
raise ValueError("`method` must be either 'pad' or 'backfill'.")
orig_values = values
yvalues = values
invalid = isna(yvalues)
valid = ~invalid
if values.ndim > 1:
raise AssertionError("This only works with 1D data.")
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(values, fill_value)
preserve_nans = _derive_indices_of_nans_to_preserve(
yvalues=yvalues,
valid=valid,
invalid=invalid,
limit=limit,
limit_area=limit_area,
limit_direction=limit_direction,
)
method = clean_fill_method(method)
if method == "pad":
values = pad_1d(values, limit=limit, mask=mask, dtype=dtype)
else:
values = backfill_1d(values, limit=limit, mask=mask, dtype=dtype)
if orig_values.dtype.kind == "M":
# convert float back to datetime64
values = values.astype(orig_values.dtype)
values[preserve_nans] = fill_value
return values
|
def interpolate_1d_fill(
values,
method="pad",
axis=0,
limit=None,
limit_area=None,
fill_value=None,
dtype: Optional[Dtype] = None,
):
"""
This is a 1D-versoin of `interpolate_2d`, which is used for methods `pad`
and `backfill` when interpolating. This 1D-version is necessary to be
able to handle kwarg `limit_area` via the function
` _derive_indices_of_nans_to_preserve`. It is used the same way as the
1D-interpolation functions which are based on scipy-interpolation, i.e.
via np.apply_along_axis.
"""
if method == "pad":
limit_direction = "forward"
elif method == "backfill":
limit_direction = "backward"
else:
raise ValueError("`method` must be either 'pad' or 'backfill'.")
orig_values = values
yvalues = values
invalid = isna(yvalues)
valid = ~invalid
if values.ndim > 1:
raise AssertionError("This only works with 1D data.")
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(values, fill_value)
preserve_nans = _derive_indices_of_nans_to_preserve(
yvalues=yvalues,
valid=valid,
invalid=invalid,
limit=limit,
limit_area=limit_area,
limit_direction=limit_direction,
)
method = clean_fill_method(method)
if method == "pad":
values = pad_1d(values, limit=limit, mask=mask, dtype=dtype)
else:
values = backfill_1d(values, limit=limit, mask=mask, dtype=dtype)
if orig_values.dtype.kind == "M":
# convert float back to datetime64
values = values.astype(orig_values.dtype)
values[preserve_nans] = fill_value
return values
|
4,225 |
def annotate_muscle(raw, threshold=1.5, picks=None, min_length_good=.1):
"""Detect segments with muscle artifacts.
Detects segments periods that contains high frequency activity beyond the
specified threshold. Muscle artifacts are most notable in the range of 110-
140Hz.
Raw data is band pass filtered between 110 and 140 Hz, the signal envelope
computed, z-scored across samples, channel averaged and low-pass
filtered to smooth transient peaks.
Parameters
----------
raw : instance of Raw
Data to compute head position.
threshold : float
The threshod for selecting segments with muscle activity artifacts.
picks : array
Channels to use for artifact detection.
min_length_good : int | float | None
The minimal good segment length between annotations, smaller segments
will be included in the movement annotation.
Returns
-------
annot : mne.Annotations
Periods with muscle artifacts.
scores_muscle : array
Z-score values averaged accros channels for each sample.
"""
raw_copy = raw.copy()
raw_copy.pick(picks)
raw_copy.pick_types(ref_meg=False) # Remove ref chans just in case
# Only one type of channel, otherwise z-score will be biased
assert(len(set(raw_copy.get_channel_types())) == 1), 'Different channel ' \
'types, pick one type'
raw_copy.filter(110, 140, fir_design='firwin')
raw_copy.apply_hilbert(envelope=True)
sfreq = raw_copy.info['sfreq']
art_scores = zscore(raw_copy._data, axis=1)
scores_muscle = filter_data(art_scores.mean(axis=0), sfreq, None, 4)
art_mask = scores_muscle > threshold
# remove artifact free periods shorter than min_length_good
idx_min = min_length_good * sfreq
comps, num_comps = label(art_mask == 0)
for l in range(1, num_comps + 1):
l_idx = np.nonzero(comps == l)[0]
if len(l_idx) < idx_min:
art_mask[l_idx] = True
annot = _annotations_from_mask(raw_copy.times, art_mask, 'BAD_muscle')
return annot, scores_muscle
|
def annotate_muscle(raw, threshold=1.5, picks=None, min_length_good=.1):
"""Detect segments with muscle artifacts.
Detects segments periods that contains high frequency activity beyond the
specified threshold. Muscle artifacts are most notable in the range of 110-
140Hz.
Raw data is band pass filtered between 110 and 140 Hz, the signal envelope
computed, z-scored across samples, channel averaged and low-pass
filtered to smooth transient peaks.
Parameters
----------
raw : instance of Raw
Data to compute head position.
threshold : float
The threshod for selecting segments with muscle activity artifacts.
picks : array
Channels to use for artifact detection.
min_length_good : int | float | None
The minimal good segment length between annotations, smaller segments
will be included in the movement annotation.
Returns
-------
annot : mne.Annotations
Periods with muscle artifacts.
scores_muscle : array
Z-score values averaged across channels for each sample.
"""
raw_copy = raw.copy()
raw_copy.pick(picks)
raw_copy.pick_types(ref_meg=False) # Remove ref chans just in case
# Only one type of channel, otherwise z-score will be biased
assert(len(set(raw_copy.get_channel_types())) == 1), 'Different channel ' \
'types, pick one type'
raw_copy.filter(110, 140, fir_design='firwin')
raw_copy.apply_hilbert(envelope=True)
sfreq = raw_copy.info['sfreq']
art_scores = zscore(raw_copy._data, axis=1)
scores_muscle = filter_data(art_scores.mean(axis=0), sfreq, None, 4)
art_mask = scores_muscle > threshold
# remove artifact free periods shorter than min_length_good
idx_min = min_length_good * sfreq
comps, num_comps = label(art_mask == 0)
for l in range(1, num_comps + 1):
l_idx = np.nonzero(comps == l)[0]
if len(l_idx) < idx_min:
art_mask[l_idx] = True
annot = _annotations_from_mask(raw_copy.times, art_mask, 'BAD_muscle')
return annot, scores_muscle
|
50,080 |
def _eigs_csr(data, isherm, vecs, eigvals, num_large, num_small, tol, maxiter):
"""
Internal functions for computing eigenvalues and eigenstates for a sparse
matrix.
"""
N = data.shape[0]
big_vals = np.array([])
small_vals = np.array([])
evecs = None
remove_one = False
if eigvals == (N - 1):
# calculate all eigenvalues and remove one at output if using sparse
# 1: remove the smallest, -1, remove the largest
remove_one = bool(num_small) or -1
eigvals = 0
num_small = num_large = N // 2
num_small += N % 2
if vecs:
if isherm:
if num_large > 0:
big_vals, big_vecs = sp.linalg.eigsh(data, k=num_large,
which='LA', tol=tol,
maxiter=maxiter)
if num_small > 0:
small_vals, small_vecs = sp.linalg.eigsh(
data, k=num_small, which='SA',
tol=tol, maxiter=maxiter)
else:
if num_large > 0:
big_vals, big_vecs = sp.linalg.eigs(data, k=num_large,
which='LR', tol=tol,
maxiter=maxiter)
if num_small > 0:
small_vals, small_vecs = sp.linalg.eigs(
data, k=num_small, which='SR',
tol=tol, maxiter=maxiter)
if num_large != 0 and num_small != 0:
evecs = np.hstack([small_vecs, big_vecs])
elif num_large != 0 and num_small == 0:
evecs = big_vecs
elif num_large == 0 and num_small != 0:
evecs = small_vecs
else:
if isherm:
if num_large > 0:
big_vals = sp.linalg.eigsh(
data, k=num_large, which='LA',
return_eigenvectors=False, tol=tol, maxiter=maxiter)
if num_small > 0:
small_vals = sp.linalg.eigsh(
data, k=num_small, which='SA',
return_eigenvectors=False, tol=tol, maxiter=maxiter)
else:
if num_large > 0:
big_vals = sp.linalg.eigs(
data, k=num_large, which='LR',
return_eigenvectors=False, tol=tol, maxiter=maxiter)
if num_small > 0:
small_vals = sp.linalg.eigs(
data, k=num_small, which='SR',
return_eigenvectors=False, tol=tol, maxiter=maxiter)
evals = np.hstack((small_vals, big_vals))
if isherm:
evals = np.real(evals)
_zipped = list(zip(evals, range(len(evals))))
_zipped.sort()
evals, perm = list(zip(*_zipped))
if vecs:
evecs = np.array([evecs[:, k] for k in perm]).T
# remove last element if requesting N-1 eigs and using sparse
if remove_one == 1:
evals = evals[:-1]
if vecs:
evecs = evecs[:, :-1]
elif remove_one == -1:
evals = evals[1:]
if vecs:
evecs = evecs[:, 1:]
return np.array(evals), evecs
|
def _eigs_csr(data, isherm, vecs, eigvals, num_large, num_small, tol, maxiter):
"""
Internal functions for computing eigenvalues and eigenstates for a sparse
matrix.
"""
N = data.shape[0]
big_vals = np.array([])
small_vals = np.array([])
evecs = None
remove_one = 0 # 0: remove none, 1: remove smallest, -1: remove largest
if eigvals == (N - 1):
# calculate all eigenvalues and remove one at output if using sparse
# 1: remove the smallest, -1, remove the largest
remove_one = 1 if (num_small > 0) else -1
eigvals = 0
num_small = num_large = N // 2
num_small += N % 2
if vecs:
if isherm:
if num_large > 0:
big_vals, big_vecs = sp.linalg.eigsh(data, k=num_large,
which='LA', tol=tol,
maxiter=maxiter)
if num_small > 0:
small_vals, small_vecs = sp.linalg.eigsh(
data, k=num_small, which='SA',
tol=tol, maxiter=maxiter)
else:
if num_large > 0:
big_vals, big_vecs = sp.linalg.eigs(data, k=num_large,
which='LR', tol=tol,
maxiter=maxiter)
if num_small > 0:
small_vals, small_vecs = sp.linalg.eigs(
data, k=num_small, which='SR',
tol=tol, maxiter=maxiter)
if num_large != 0 and num_small != 0:
evecs = np.hstack([small_vecs, big_vecs])
elif num_large != 0 and num_small == 0:
evecs = big_vecs
elif num_large == 0 and num_small != 0:
evecs = small_vecs
else:
if isherm:
if num_large > 0:
big_vals = sp.linalg.eigsh(
data, k=num_large, which='LA',
return_eigenvectors=False, tol=tol, maxiter=maxiter)
if num_small > 0:
small_vals = sp.linalg.eigsh(
data, k=num_small, which='SA',
return_eigenvectors=False, tol=tol, maxiter=maxiter)
else:
if num_large > 0:
big_vals = sp.linalg.eigs(
data, k=num_large, which='LR',
return_eigenvectors=False, tol=tol, maxiter=maxiter)
if num_small > 0:
small_vals = sp.linalg.eigs(
data, k=num_small, which='SR',
return_eigenvectors=False, tol=tol, maxiter=maxiter)
evals = np.hstack((small_vals, big_vals))
if isherm:
evals = np.real(evals)
_zipped = list(zip(evals, range(len(evals))))
_zipped.sort()
evals, perm = list(zip(*_zipped))
if vecs:
evecs = np.array([evecs[:, k] for k in perm]).T
# remove last element if requesting N-1 eigs and using sparse
if remove_one == 1:
evals = evals[:-1]
if vecs:
evecs = evecs[:, :-1]
elif remove_one == -1:
evals = evals[1:]
if vecs:
evecs = evecs[:, 1:]
return np.array(evals), evecs
|
55,548 |
def test_time_ops():
# Make a pandas.core.indexes.timedeltas.TimedeltaIndex
deltas = pd.to_timedelta([1], unit="h")
modin_series = pd.Series(np.datetime64("2000-12-12")) + deltas
pandas_series = pandas.Series(np.datetime64("2000-12-12")) + deltas
df_equals(modin_series, pandas_series)
modin_series = pd.Series(np.datetime64("2000-12-12")) - deltas
pandas_series = pandas.Series(np.datetime64("2000-12-12")) - deltas
df_equals(modin_series, pandas_series)
|
def test_time_ops():
# Make a pandas.core.indexes.timedeltas.TimedeltaIndex
deltas = pd.to_timedelta([1], unit="h")
test_series = create_test_series(np.datetime64("2000-12-12"))
eval_general(*test_series, lambda s: s + deltas)
modin_series = pd.Series(np.datetime64("2000-12-12")) - deltas
pandas_series = pandas.Series(np.datetime64("2000-12-12")) - deltas
df_equals(modin_series, pandas_series)
|
43,305 |
def test_graph_constructor_nodes_from_edges():
edges = {
"a": pd.DataFrame({"source": [1], "target": [0]}, index=[0]),
"b": pd.DataFrame({"source": [4, 5], "target": [0, 2]}, index=[1, 2]),
}
g = StellarGraph(nodes=None, edges=edges)
assert sorted(g.nodes()) == [0, 1, 2, 4, 5]
|
def test_graph_constructor_nodes_from_edges():
edges = {
"a": pd.DataFrame({"source": [1], "target": [0]}, index=[0]),
"b": pd.DataFrame({"source": [4, 5], "target": [0, 2]}, index=[1, 2]),
}
g = StellarGraph(edges=edges)
assert sorted(g.nodes()) == [0, 1, 2, 4, 5]
|
31,933 |
def fetch_notables(service, cache_object=None, enrich_notables=False):
last_run_data = demisto.getLastRun()
if not last_run_data:
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease first run')
last_run_time = last_run_data and 'time' in last_run_data and last_run_data['time']
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease last run is:\n {}'.format(last_run_data))
dem_params = demisto.params()
occurred_look_behind = int(dem_params.get('occurrence_look_behind', 15) or 15)
extensive_log('[SplunkPyPreRelease] occurrence look behind is: {}'.format(occurred_look_behind))
occured_start_time, now = get_fetch_start_times(dem_params, service, last_run_time, occurred_look_behind)
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease last run time: {}, now: {}'.format(last_run_time, now))
default_batch_size = int(dem_params.get('batch_size', 200))
batch_size = last_run_data.get('batch_size') or default_batch_size
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease batch size is : {}'.format(batch_size))
kwargs_oneshot = build_fetch_kwargs(dem_params, batch_size, occured_start_time, now)
fetch_query = build_fetch_query(dem_params)
oneshotsearch_results = service.jobs.oneshot(fetch_query, **kwargs_oneshot) # type: ignore
reader = results.ResultsReader(oneshotsearch_results)
last_run_fetched_ids = last_run_data.get('found_incidents_ids', {})
incidents = [] # type: List[Dict]
notables = []
incident_ids_to_add = {}
for item in reader:
if len(incidents) >= FETCH_LIMIT:
break
extensive_log('[SplunkPyPreRelease] Incident data before parsing to notable: {}'.format(item))
notable_incident = Notable(data=item)
inc = notable_incident.to_incident()
extensive_log('[SplunkPyPreRelease] Incident data after parsing to notable: {}'.format(inc))
incident_id = create_incident_custom_id(inc)
if incident_id not in last_run_fetched_ids:
incident_ids_to_add[incident_id] = splunk_time_to_datetime(inc["occurred"]).strftime(SPLUNK_TIME_FORMAT)
# Save the occurrence time of each event in datetime format
incidents.append(inc)
notables.append(notable_incident)
else:
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - Dropped incident {} due to duplication.'.format(incident_id))
extensive_log('[SplunkPyPreRelease] Size of last_run_fetched_ids before adding new IDs: {}'.format(len(last_run_fetched_ids)))
for incident_id in incident_ids_to_add:
last_run_fetched_ids[incident_id] = incident_ids_to_add[incident_id]
# Adding the new incidents with the occurrence time.
extensive_log(
'[SplunkPyPreRelease] Size of last_run_fetched_ids after adding new IDs: {}'.format(len(last_run_fetched_ids)))
last_run_fetched_ids = remove_old_incident_ids(last_run_fetched_ids, occured_start_time)
extensive_log('[SplunkPyPreRelease] Size of last_run_fetched_ids after '
'removing old IDs: {}'.format(len(last_run_fetched_ids)))
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - incidents fetched on last run = {}'.format(last_run_fetched_ids))
debug_message = 'SplunkPyPreRelease - total number of incidents found: from {}\n to {}\n with the ' \
'query: {} is: {}.'.format(last_run_time, now, fetch_query, len(incidents))
extensive_log(debug_message)
if not enrich_notables:
demisto.incidents(incidents)
else:
cache_object.not_yet_submitted_notables += notables
if DUMMY not in last_run_data:
# we add dummy data to the last run to differentiate between the fetch-incidents triggered to the
# fetch-incidents running as part of "Pull from instance" in Classification & Mapping, as we don't
# want to add data to the integration context (which will ruin the logic of the cache object)
last_run_data.update({DUMMY: DUMMY})
if len(incidents) == 0:
next_run = get_next_start_time(last_run_time, now, False)
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - Next run time with no incidents found: {}.'.format(next_run))
new_last_run = {
'time': next_run,
'found_incidents_ids': last_run_fetched_ids,
}
else:
if len(last_run_fetched_ids) + FETCH_LIMIT >= batch_size:
# If we almost saw all the events return from the query, we should increase the batch size to reach
# the new events.
batch_size += default_batch_size
latest_incident_fetched_time = get_latest_incident_time(incidents)
next_run = get_next_start_time(latest_incident_fetched_time, now, were_new_incidents_found=True)
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - '
'Next run time with too many incidents: {}. Batch size: {}'.format(next_run, batch_size))
new_last_run = {
'time': next_run,
'found_incidents_ids': last_run_fetched_ids,
'batch_size': batch_size
}
last_run_data.update(new_last_run)
demisto.setLastRun(last_run_data)
|
def fetch_notables(service, cache_object=None, enrich_notables=False):
last_run_data = demisto.getLastRun()
if not last_run_data:
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease first run')
last_run_time = last_run_data and 'time' in last_run_data and last_run_data['time']
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease last run is:\n {}'.format(last_run_data))
dem_params = demisto.params()
occurred_look_behind = int(dem_params.get('occurrence_look_behind', 15) or 15)
extensive_log('[SplunkPyPreRelease] occurrence look behind is: {}'.format(occurred_look_behind))
occured_start_time, now = get_fetch_start_times(dem_params, service, last_run_time, occurred_look_behind)
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease last run time: {}, now: {}'.format(last_run_time, now))
default_batch_size = int(dem_params.get('batch_size', 200))
batch_size = last_run_data.get('batch_size') or default_batch_size
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease batch size is : {}'.format(batch_size))
kwargs_oneshot = build_fetch_kwargs(dem_params, batch_size, occured_start_time, now)
fetch_query = build_fetch_query(dem_params)
oneshotsearch_results = service.jobs.oneshot(fetch_query, **kwargs_oneshot) # type: ignore
reader = results.ResultsReader(oneshotsearch_results)
last_run_fetched_ids = last_run_data.get('found_incidents_ids', {})
incidents = [] # type: List[Dict]
notables = []
incident_ids_to_add = {}
for item in reader:
if len(incidents) >= FETCH_LIMIT:
break
extensive_log('[SplunkPyPreRelease] Incident data before parsing to notable: {}'.format(item))
notable_incident = Notable(data=item)
inc = notable_incident.to_incident()
extensive_log('[SplunkPyPreRelease] Incident data after parsing to notable: {}'.format(inc))
incident_id = create_incident_custom_id(inc)
if incident_id not in last_run_fetched_ids:
incident_ids_to_add[incident_id] = splunk_time_to_datetime(inc["occurred"]).strftime(SPLUNK_TIME_FORMAT)
# Save the occurrence time of each event in datetime format
incidents.append(inc)
notables.append(notable_incident)
else:
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - Dropped incident {} due to duplication.'.format(incident_id))
extensive_log('[SplunkPyPreRelease] Size of last_run_fetched_ids before adding new IDs: {}'.format(len(last_run_fetched_ids)))
for incident_id in incident_ids_to_add:
last_run_fetched_ids[incident_id] = incident_ids_to_add[incident_id]
# Adding the new incidents with the occurrence time.
extensive_log(
'[SplunkPyPreRelease] Size of last_run_fetched_ids after adding new IDs: {}'.format(len(last_run_fetched_ids)))
last_run_fetched_ids = remove_old_incident_ids(last_run_fetched_ids, occured_start_time)
extensive_log('[SplunkPyPreRelease] Size of last_run_fetched_ids after '
'removing old IDs: {}'.format(len(last_run_fetched_ids)))
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - incidents fetched on last run = {}'.format(last_run_fetched_ids))
debug_message = 'SplunkPyPreRelease - total number of incidents found: from {}\n to {}\n with the ' \
'query: {} is: {}.'.format(last_run_time, now, fetch_query, len(incidents))
extensive_log(debug_message)
if not enrich_notables:
demisto.incidents(incidents)
else:
cache_object.not_yet_submitted_notables += notables
if DUMMY not in last_run_data:
# we add dummy data to the last run to differentiate between the fetch-incidents triggered to the
# fetch-incidents running as part of "Pull from instance" in Classification & Mapping, as we don't
# want to add data to the integration context (which will ruin the logic of the cache object)
last_run_data.update({DUMMY: DUMMY})
if len(incidents) == 0:
next_run = get_next_start_time(last_run_time, now, False)
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - Next run time with no incidents found: {}.'.format(next_run))
new_last_run = {
'time': next_run,
'found_incidents_ids': last_run_fetched_ids,
}
else:
if len(last_run_fetched_ids) + FETCH_LIMIT >= batch_size:
# If we almost saw all the events return from the query, we should increase the batch size to reach
# the new events.
batch_size += default_batch_size
latest_incident_fetched_time = get_latest_incident_time(incidents)
next_run = get_next_start_time(latest_incident_fetched_time, now, were_new_incidents_found=True)
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - '
'Next run time with too many incidents: {}. Batch size: {}'.format(next_run, batch_size))
new_last_run = {
'time': next_run,
'found_incidents_ids': last_run_fetched_ids,
'next_batch_size': batch_size
}
last_run_data.update(new_last_run)
demisto.setLastRun(last_run_data)
|
8,858 |
def test_find_rule_from_callable(mockbot):
# prepare callable
@module.find(r'hello', r'hi', r'hey', r'hello|hi')
def handler(wrapped, trigger):
wrapped.reply('Hi!')
loader.clean_callable(handler, mockbot.settings)
handler.plugin_name = 'testplugin'
# create rule from a clean callable
rule = rules.FindRule.from_callable(mockbot.settings, handler)
assert str(rule) == '<FindRule testplugin.handler (4)>'
# match on "Hello" twice
line = ':[email protected] PRIVMSG #sopel :Hello, world'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 2, 'Exactly 2 rules must match'
assert all(result.group(0) == 'Hello' for result in results)
# match on "hi" twice
line = ':[email protected] PRIVMSG #sopel :hi!'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 2, 'Exactly 2 rules must match'
assert all(result.group(0) == 'hi' for result in results)
# match on "hey" twice
line = ':[email protected] PRIVMSG #sopel :hey how are you doing?'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 1, 'Exactly 1 rule must match'
assert results[0].group(0) == 'hey'
# match on "hey" twice because it's twice in the line
line = ':[email protected] PRIVMSG #sopel :I say hey, can you say hey?'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 2, 'Exactly 2 rules must match'
assert all(result.group(0) == 'hey' for result in results)
|
def test_find_rule_from_callable(mockbot):
# prepare callable
@module.find(r'hello', r'hi', r'hey', r'hello|hi')
def handler(wrapped, trigger):
wrapped.reply('Hi!')
loader.clean_callable(handler, mockbot.settings)
handler.plugin_name = 'testplugin'
# create rule from a cleaned callable
rule = rules.FindRule.from_callable(mockbot.settings, handler)
assert str(rule) == '<FindRule testplugin.handler (4)>'
# match on "Hello" twice
line = ':[email protected] PRIVMSG #sopel :Hello, world'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 2, 'Exactly 2 rules must match'
assert all(result.group(0) == 'Hello' for result in results)
# match on "hi" twice
line = ':[email protected] PRIVMSG #sopel :hi!'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 2, 'Exactly 2 rules must match'
assert all(result.group(0) == 'hi' for result in results)
# match on "hey" twice
line = ':[email protected] PRIVMSG #sopel :hey how are you doing?'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 1, 'Exactly 1 rule must match'
assert results[0].group(0) == 'hey'
# match on "hey" twice because it's twice in the line
line = ':[email protected] PRIVMSG #sopel :I say hey, can you say hey?'
pretrigger = trigger.PreTrigger(mockbot.nick, line)
results = list(rule.match(mockbot, pretrigger))
assert len(results) == 2, 'Exactly 2 rules must match'
assert all(result.group(0) == 'hey' for result in results)
|
11,800 |
def darker(image1, image2):
"""
Compares the two images, pixel by pixel, and returns a new image containing
the darker values. Note that at least one of the images must have mode "1".
.. code-block:: python
out = min(image1, image2)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_darker(image2.im))
|
def darker(image1, image2):
"""
Compares the two images, pixel by pixel, and returns a new image containing
the darker values. At least one of the images must have mode "1".
.. code-block:: python
out = min(image1, image2)
:rtype: :py:class:`~PIL.Image.Image`
"""
image1.load()
image2.load()
return image1._new(image1.im.chop_darker(image2.im))
|
36,466 |
def _aix_bosmp64():
# type: () -> List[str]
"""
The fileset bos.mp64 is the AIX kernel. It's VRMF and builddate
reflect the current levels of the runtime environment.
"""
tmp = subprocess.check_output(["/usr/bin/lslpp", "-Lqc", "bos.mp64"])
tmp = tmp.decode("utf-8").strip().split(":") # type: ignore
# lpp, vrmf, bd = list(tmp[index] for index in [0, 2, -1]) # type: ignore
# e.g., ['bos.mp64', '7.1.4.34', '1806']
return list(tmp[index] for index in [0, 2, -1])
|
def _aix_bosmp64():
# type: () -> List[str]
"""
The fileset bos.mp64 is the AIX kernel. It's VRMF and builddate
reflect the current levels of the runtime environment.
"""
lslpp_output = subprocess.check_output(["/usr/bin/lslpp", "-Lqc", "bos.mp64"], text=True)
tmp = tmp.decode("utf-8").strip().split(":") # type: ignore
# lpp, vrmf, bd = list(tmp[index] for index in [0, 2, -1]) # type: ignore
# e.g., ['bos.mp64', '7.1.4.34', '1806']
return list(tmp[index] for index in [0, 2, -1])
|
20,320 |
def get_pypath():
import sysconfig
pypath = sysconfig.get_path('purelib', vars={'base': ''}).replace('dist-packages', 'site-packages')
# Ensure that / is the path separator and not \, then strip /
# Starting with Python 3.10 the Debian installation returns paths like
# '/usr/local', even though it does the installation to just ''/usr'.
if pypath.startswith('/local'):
pypath = pypath.split('/', 2)[-1]
return Path(pypath).as_posix().strip('/')
|
def get_pypath():
import sysconfig
pypath = sysconfig.get_path('purelib', vars={'base': ''}).replace('dist-packages', 'site-packages')
# Ensure that / is the path separator and not \, then strip /
# Starting with Python 3.10 the Debian installation returns paths like
# '/usr/local', even though it does the installation to just '/usr'.
if pypath.startswith('/local'):
pypath = pypath.split('/', 2)[-1]
return Path(pypath).as_posix().strip('/')
|
25,750 |
def expand_series(ser, columns):
"""
Helper function to fastly expand a series to a dataframe with according
column axis and every single column being the equal to the given series.
"""
return ser.to_frame(columns[0]).reindex(columns=columns).ffill(axis=1)
|
def expand_series(ser, columns):
"""
Helper function to quickly expand a series to a dataframe with according
column axis and every single column being the equal to the given series.
"""
return ser.to_frame(columns[0]).reindex(columns=columns).ffill(axis=1)
|
32,254 |
def update_remote_system_command(client: Client, args: Dict[str, Any], params: Dict[str, Any]) -> str:
"""update-remote-system command: pushes local changes to the remote system
:type client: ``Client``
:param client: XSOAR client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['data']`` the data to send to the remote system
``args['entries']`` the entries to send to the remote system
``args['incidentChanged']`` boolean telling us if the local incident indeed changed or not
``args['remoteId']`` the remote incident id
:return:
``str`` containing the remote incident id - really important if the incident is newly created remotely
:rtype: ``str``
"""
parsed_args = UpdateRemoteSystemArgs(args)
ticket_id = parsed_args.remote_incident_id
if parsed_args.delta:
demisto.debug(f'Got the following delta keys {str(list(parsed_args.delta.keys()))}')
demisto.debug(f'Sending incident with remote ID [{parsed_args.remote_incident_id}] to remote system\n')
new_incident_id: str = parsed_args.remote_incident_id
updated_incident = {}
if not parsed_args.remote_incident_id or parsed_args.incident_changed:
if parsed_args.remote_incident_id:
old_incident = client.get_ticket(parsed_args.remote_incident_id)
for changed_key in parsed_args.delta.keys():
old_incident[changed_key] = parsed_args.delta[changed_key] # type: ignore
parsed_args.data = old_incident
else:
parsed_args.data['createInvestigation'] = True
updated_incident = client.update_ticket(parsed_args.data)
else:
demisto.debug(f'Skipping updating remote incident fields [{parsed_args.remote_incident_id}] as it is '
f'not new nor changed.')
# Close incident if relevant
if updated_incident and parsed_args.inc_status == IncidentStatus.DONE:
demisto.debug(f'Closing remote incident {ticket_id}')
client.close_ticket(ticket_id)
entries = parsed_args.entries
if entries:
demisto.debug(f'New entries {entries}')
for entry in entries:
demisto.debug(f'Sending entry {entry.get("id")}, type: {entry.get("type")}')
# Mirroring files as entries
if entry.get('type') == 3:
path_res = demisto.getFilePath(entry.get('id'))
demisto.debug('path res' + str(path_res))
full_file_name = path_res.get('name')
file_name, file_extension = os.path.splitext(full_file_name)
if not file_extension:
file_extension = ''
up = client.upload_document(file_name + '_mirrored_from_xsoar' + file_extension, path_res.get('path'))
client.link_document_to_ticket(up['id'], ticket_id)
else:
# Mirroring comment and work notes as entries
user = entry.get('user', 'dbot') or 'dbot'
text = f"({user}): {str(entry.get('contents', ''))}\n\n Mirrored from Cortex XSOAR"
client.add_comment(ticket_id, text)
return new_incident_id
|
def update_remote_system_command(client: Client, args: Dict[str, Any]) -> str:
"""update-remote-system command: pushes local changes to the remote system
:type client: ``Client``
:param client: XSOAR client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['data']`` the data to send to the remote system
``args['entries']`` the entries to send to the remote system
``args['incidentChanged']`` boolean telling us if the local incident indeed changed or not
``args['remoteId']`` the remote incident id
:return:
``str`` containing the remote incident id - really important if the incident is newly created remotely
:rtype: ``str``
"""
parsed_args = UpdateRemoteSystemArgs(args)
ticket_id = parsed_args.remote_incident_id
if parsed_args.delta:
demisto.debug(f'Got the following delta keys {str(list(parsed_args.delta.keys()))}')
demisto.debug(f'Sending incident with remote ID [{parsed_args.remote_incident_id}] to remote system\n')
new_incident_id: str = parsed_args.remote_incident_id
updated_incident = {}
if not parsed_args.remote_incident_id or parsed_args.incident_changed:
if parsed_args.remote_incident_id:
old_incident = client.get_ticket(parsed_args.remote_incident_id)
for changed_key in parsed_args.delta.keys():
old_incident[changed_key] = parsed_args.delta[changed_key] # type: ignore
parsed_args.data = old_incident
else:
parsed_args.data['createInvestigation'] = True
updated_incident = client.update_ticket(parsed_args.data)
else:
demisto.debug(f'Skipping updating remote incident fields [{parsed_args.remote_incident_id}] as it is '
f'not new nor changed.')
# Close incident if relevant
if updated_incident and parsed_args.inc_status == IncidentStatus.DONE:
demisto.debug(f'Closing remote incident {ticket_id}')
client.close_ticket(ticket_id)
entries = parsed_args.entries
if entries:
demisto.debug(f'New entries {entries}')
for entry in entries:
demisto.debug(f'Sending entry {entry.get("id")}, type: {entry.get("type")}')
# Mirroring files as entries
if entry.get('type') == 3:
path_res = demisto.getFilePath(entry.get('id'))
demisto.debug('path res' + str(path_res))
full_file_name = path_res.get('name')
file_name, file_extension = os.path.splitext(full_file_name)
if not file_extension:
file_extension = ''
up = client.upload_document(file_name + '_mirrored_from_xsoar' + file_extension, path_res.get('path'))
client.link_document_to_ticket(up['id'], ticket_id)
else:
# Mirroring comment and work notes as entries
user = entry.get('user', 'dbot') or 'dbot'
text = f"({user}): {str(entry.get('contents', ''))}\n\n Mirrored from Cortex XSOAR"
client.add_comment(ticket_id, text)
return new_incident_id
|
42,061 |
def run(args: argparse.Namespace) -> None:
kurobako_cmd = os.path.join(args.path_to_kurobako, "kurobako")
subprocess.run(f"{kurobako_cmd} --version", shell=True)
if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)):
raise ValueError(f"Data directory {args.data_dir} cannot be found.")
os.makedirs(args.out_dir, exist_ok=True)
study_json_fn = os.path.join(args.out_dir, "studies.json")
subprocess.check_call(f"echo >| {study_json_fn}", shell=True)
solvers_filename = os.path.join(args.out_dir, "solvers.json")
subprocess.check_call(f"echo >| {solvers_filename}", shell=True)
problems_filename = os.path.join(args.out_dir, "problems.json")
subprocess.check_call(f"echo >| {problems_filename}", shell=True)
# Create ZDT problems
cmd = f"{kurobako_cmd} problem-suite zdt | tee -a {problems_filename}"
subprocess.run(cmd, shell=True)
# Create NAS bench problem(C) (for Multi-Objective Settings).
dataset = os.path.join(args.data_dir, "nasbench_full.bin")
cmd = (
f'{kurobako_cmd} problem nasbench "{dataset}"'
f"--encoding C --metrics accuracy params | tee -a {problems_filename}"
)
subprocess.run(cmd, shell=True)
# Create solvers.
sampler_list = args.sampler_list.split()
sampler_kwargs_list = args.sampler_kwargs_list.split()
if len(sampler_list) != len(sampler_kwargs_list):
raise ValueError(
"The number of samplers does not match the given keyword arguments. \n"
f"sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}."
)
for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list):
name = f"{args.name_prefix}_{sampler}"
python_command = f"mo_runner.py {sampler} {sampler_kwargs}"
cmd = (
f"{kurobako_cmd} solver --name {name} command python {python_command}"
f"| tee -a {solvers_filename}"
)
subprocess.run(cmd, shell=True)
# Create study.
cmd = (
f"{kurobako_cmd} studies --budget 1000 "
f"--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) "
f"--repeats {args.n_runs} --seed {args.seed} "
f"> {study_json_fn}"
)
subprocess.run(cmd, shell=True)
result_filename = os.path.join(args.out_dir, "results.json")
cmd = (
f"cat {study_json_fn} | {kurobako_cmd} run --parallelism {args.n_jobs} "
f"> {result_filename}"
)
subprocess.run(cmd, shell=True)
# Report
report_filename = os.path.join(args.out_dir, "report.md")
cmd = f"cat {result_filename} | {kurobako_cmd} report > {report_filename}"
subprocess.run(cmd, shell=True)
# Plot pareto-front.
problem_names = ["NASBench", "ZDT1", "ZDT2", "ZDT3", "ZDT4", "ZDT5", "ZDT6"]
for problem_name in problem_names:
cmd = (
f"cat {result_filename} | grep {problem_name} | "
f"{kurobako_cmd} plot pareto-front -o {args.out_dir}"
)
subprocess.run(cmd, shell=True)
|
def run(args: argparse.Namespace) -> None:
kurobako_cmd = os.path.join(args.path_to_kurobako, "kurobako")
subprocess.run(f"{kurobako_cmd} --version", shell=True)
if not (os.path.exists(args.data_dir) and os.path.isdir(args.data_dir)):
raise ValueError(f"Data directory {args.data_dir} cannot be found.")
os.makedirs(args.out_dir, exist_ok=True)
study_json_fn = os.path.join(args.out_dir, "studies.json")
subprocess.check_call(f"echo >| {study_json_fn}", shell=True)
solvers_filename = os.path.join(args.out_dir, "solvers.json")
subprocess.check_call(f"echo >| {solvers_filename}", shell=True)
problems_filename = os.path.join(args.out_dir, "problems.json")
subprocess.check_call(f"echo >| {problems_filename}", shell=True)
# Create ZDT problems
cmd = f"{kurobako_cmd} problem-suite zdt | tee -a {problems_filename}"
subprocess.run(cmd, shell=True)
# Create NAS bench problem(C) (for Multi-Objective Settings).
dataset = os.path.join(args.data_dir, "nasbench_full.bin")
cmd = (
f'{kurobako_cmd} problem nasbench "{dataset}"'
f"--encoding C --metrics accuracy params | tee -a {problems_filename}"
)
subprocess.run(cmd, shell=True)
# Create solvers.
sampler_list = args.sampler_list.split()
sampler_kwargs_list = args.sampler_kwargs_list.split()
if len(sampler_list) != len(sampler_kwargs_list):
raise ValueError(
"The number of samplers does not match the given keyword arguments. \n"
f"sampler_list: {sampler_list}, sampler_kwargs_list: {sampler_kwargs_list}."
)
for sampler, sampler_kwargs in zip(sampler_list, sampler_kwargs_list):
name = f"{args.name_prefix}_{sampler}"
python_command = f"mo_runner.py {sampler} {sampler_kwargs}"
cmd = (
f"{kurobako_cmd} solver --name {name} command python {python_command}"
f"| tee -a {solvers_filename}"
)
subprocess.run(cmd, shell=True)
# Create study.
cmd = (
f"{kurobako_cmd} studies --budget 1000 "
f"--solvers $(cat {solvers_filename}) --problems $(cat {problems_filename}) "
f"--repeats {args.n_runs} --seed {args.seed} "
f"> {study_json_fn}"
)
subprocess.run(cmd, shell=True)
result_filename = os.path.join(args.out_dir, "results.json")
cmd = (
f"cat {study_json_filename} | {kurobako_cmd} run --parallelism {args.n_jobs} -q "
f"> {result_filename}"
)
subprocess.run(cmd, shell=True)
# Report
report_filename = os.path.join(args.out_dir, "report.md")
cmd = f"cat {result_filename} | {kurobako_cmd} report > {report_filename}"
subprocess.run(cmd, shell=True)
# Plot pareto-front.
problem_names = ["NASBench", "ZDT1", "ZDT2", "ZDT3", "ZDT4", "ZDT5", "ZDT6"]
for problem_name in problem_names:
cmd = (
f"cat {result_filename} | grep {problem_name} | "
f"{kurobako_cmd} plot pareto-front -o {args.out_dir}"
)
subprocess.run(cmd, shell=True)
|
54,961 |
def hf_state(n_electrons, m_spin_orbitals):
r"""Generates the occupation-number vector representing the Hartree-Fock (HF)
state of :math:`N` electrons in a basis of :math:`M` spin orbitals.
The many-particle wave function in the HF approximation is a `Slater determinant
<https://en.wikipedia.org/wiki/Slater_determinant>`_. In Fock space, a Slater determinant
is represented by the occupation-number vector:
.. math:
\vert {\bf n} \rangle = \vert n_1, n_2, \dots, n_M \rangle,
n_i = \left\lbrace \begin{array}{ll} 1 & i \leq N \\ 0 & i > N \end{array} \right..
**Example**
>>> init_state = hf_state(2, 6)
>>> print(init_state)
[1 1 0 0 0 0]
Args:
n_electrons (int): number of active electrons
m_spin_orbitals (int): number of active **spin-orbitals**
Returns:
array: NumPy array containing the vector :math:`\vert {\bf n} \rangle`
"""
if not n_electrons > 0:
raise ValueError(
"The number of active electrons has to be > 0; got 'n_electrons' = {}"
.format(n_electrons)
)
if n_electrons > m_spin_orbitals:
raise ValueError(
"The number of active orbitals has to be >= the number of active electrons;"
" got 'm_spin_orbitals'={} < 'n_electrons'={}".format(m_spin_orbitals, n_electrons)
)
hf_state_on = [1 if i < n_electrons else 0 for i in range(m_spin_orbitals)]
return np.array(hf_state_on)
|
def hf_state(n_electrons, m_spin_orbitals):
r"""Generates the occupation-number vector representing the Hartree-Fock (HF)
state of :math:`N` electrons in a basis of :math:`M` spin orbitals.
The many-particle wave function in the HF approximation is a `Slater determinant
<https://en.wikipedia.org/wiki/Slater_determinant>`_. In Fock space, a Slater determinant
is represented by the occupation-number vector:
.. math:
\vert {\bf n} \rangle = \vert n_1, n_2, \dots, n_M \rangle,
n_i = \left\lbrace \begin{array}{ll} 1 & i \leq N \\ 0 & i > N \end{array} \right.
**Example**
>>> init_state = hf_state(2, 6)
>>> print(init_state)
[1 1 0 0 0 0]
Args:
n_electrons (int): number of active electrons
m_spin_orbitals (int): number of active **spin-orbitals**
Returns:
array: NumPy array containing the vector :math:`\vert {\bf n} \rangle`
"""
if not n_electrons > 0:
raise ValueError(
"The number of active electrons has to be > 0; got 'n_electrons' = {}"
.format(n_electrons)
)
if n_electrons > m_spin_orbitals:
raise ValueError(
"The number of active orbitals has to be >= the number of active electrons;"
" got 'm_spin_orbitals'={} < 'n_electrons'={}".format(m_spin_orbitals, n_electrons)
)
hf_state_on = [1 if i < n_electrons else 0 for i in range(m_spin_orbitals)]
return np.array(hf_state_on)
|
32,053 |
def list_issue_comments_command():
args = demisto.args()
issue_number = args.get('issue_number')
since_date = args.get('since', None)
response = list_issue_comments(issue_number, since_date)
ec_object = [format_comment_outputs(comment, issue_number) for comment in response]
ec = {
'GitHub.Comment(val.IssueNumber === obj.IssueNumber && val.ID === obj.ID)': ec_object
}
human_readable = tableToMarkdown(f'Comments for Issue #{issue_number}', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
|
def list_issue_comments_command():
args = demisto.args()
issue_number = args.get('issue_number')
since_date = args.get('since')
response = list_issue_comments(issue_number, since_date)
ec_object = [format_comment_outputs(comment, issue_number) for comment in response]
ec = {
'GitHub.Comment(val.IssueNumber === obj.IssueNumber && val.ID === obj.ID)': ec_object
}
human_readable = tableToMarkdown(f'Comments for Issue #{issue_number}', ec_object, removeNull=True)
return_outputs(readable_output=human_readable, outputs=ec, raw_response=response)
|
58,031 |
def output_format(res, output_type=None, readable=None):
if res:
if isinstance(res, list):
keys = res[0].keys()
else:
keys = res.keys()
key_list = []
for key in keys:
key_list.append(key)
if not output_type:
output_type = key_list[0].split(".")[0]
result = []
if not readable:
readable = output_type
result.append(CommandResults(outputs_prefix='OPNSense.' + output_type,
outputs_key_field=key_list,
outputs=res,
raw_response=res,
readable_output=tableToMarkdown(name='OPNSense ' + readable, t=res, headers=key_list)))
return result
else:
return "No result"
|
def output_format(res, output_type=None, readable=None):
if res:
if isinstance(res, list):
key_list = list(res[0].keys())
else:
key_list = list(res.keys())
if not output_type:
output_type = key_list[0].split(".")[0]
result = []
if not readable:
readable = output_type
result.append(CommandResults(outputs_prefix='OPNSense.' + output_type,
outputs_key_field=key_list,
outputs=res,
raw_response=res,
readable_output=tableToMarkdown(name='OPNSense ' + readable, t=res, headers=key_list)))
return result
else:
return "No result"
|
10,909 |
def run_in_venv(cmd, venv_path, action_desc):
"""Run the givven command in the virtualenv at the given path"""
cmd = 'source %s/bin/activate && %s' % (venv_path, cmd)
return run_cmd(cmd, action_desc, shell=True, executable='/bin/bash')
|
def run_in_venv(cmd, venv_path, action_desc):
"""Run the given command in the virtualenv at the given path"""
cmd = 'source %s/bin/activate && %s' % (venv_path, cmd)
return run_cmd(cmd, action_desc, shell=True, executable='/bin/bash')
|
28,345 |
def many_many(curr: sqlite3.Cursor, *columns: str) -> list[tuple[Any, ...]]:
"""Get all values of many columns
Args:
curr: cursor to operate on
columns: names of the columns
Returns:
list of lists of values
"""
res = curr.fetchall()
if _need_to_select(curr, *columns):
raise RuntimeError("Expected consistent selection")
return res
|
def many_many(curr: sqlite3.Cursor, *columns: str) -> list[tuple[Any, ...]]:
"""Get all values of many columns
Args:
curr: cursor to operate on
columns: names of the columns
Returns:
list of lists of values
"""
res = curr.fetchall()
if _need_to_select(curr, *columns):
raise RuntimeError("Expected consistent selection: cursor has columns {tuple(c[0] for c in curr.description)} but expected {columns}")
return res
|
20,258 |
def create_landing_page(page_title, page_slug, parent_path=None, \
has_email_signup=False, email_gd_code="USCFPB_000"):
# create a new page and set it as the child of an existing page
# return list of route paths
# get the root of the current site
site_model = apps.get_model('wagtailcore', 'Site')
site = site_model.objects.get(is_default_site=True)
root = site.root_page
# since parent was not provided, make root
parent = root
# if a parent path is provided, use that as parent
if parent_path:
path_components = \
[component for component in parent_path.split('/') if component]
try:
route = root.route(None, path_components)
except Http404:
print("skipping page creation")
parent = route.page
# create page, add it as a child of parent, save, and publish
new_page = LandingPage(title=page_title, slug=page_slug)
# update sidefoot streamfield if required
if has_email_signup:
new_page.sidefoot=json.dumps([
{'type':'email_signup', 'value':{'gd_code': email_gd_code}}
])
try:
parent.add_child(instance=new_page)
new_page.save_revision().publish()
except ValidationError:
print("skipping page creation")
# return path
return new_page.get_url(None, site)
|
def create_landing_page(page_title, page_slug, parent_path=None,
has_email_signup=False, email_gd_code="USCFPB_000"):
# create a new page and set it as the child of an existing page
# return list of route paths
# get the root of the current site
site_model = apps.get_model('wagtailcore', 'Site')
site = site_model.objects.get(is_default_site=True)
root = site.root_page
# since parent was not provided, make root
parent = root
# if a parent path is provided, use that as parent
if parent_path:
path_components = \
[component for component in parent_path.split('/') if component]
try:
route = root.route(None, path_components)
except Http404:
print("skipping page creation")
parent = route.page
# create page, add it as a child of parent, save, and publish
new_page = LandingPage(title=page_title, slug=page_slug)
# update sidefoot streamfield if required
if has_email_signup:
new_page.sidefoot=json.dumps([
{'type':'email_signup', 'value':{'gd_code': email_gd_code}}
])
try:
parent.add_child(instance=new_page)
new_page.save_revision().publish()
except ValidationError:
print("skipping page creation")
# return path
return new_page.get_url(None, site)
|
52,285 |
def get_parser():
parser = SCTArgumentParser(
description='Compute SNR using methods described in [Dietrich et al., Measurement of'
' signal-to-noise ratios in MR images: Influence of multichannel coils, parallel '
'imaging, and reconstruction filters. J Magn Reson Imaging 2007; 26(2): 375-385].'
)
mandatoryArguments = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatoryArguments.add_argument(
'-i',
required=True,
help='4D data to compute the SNR on (along the 4th dimension). Example: b0s.nii.gz',
metavar=Metavar.file)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
optional.add_argument(
'-m',
help='Binary (or weighted) mask within which SNR will be averaged. Example: dwi_moco_mean_seg.nii.gz',
metavar=Metavar.file,
default='')
optional.add_argument(
'-method',
help='R|Method to use to compute the SNR:\n'
' diff (default): Substract two volumes (defined by -vol) and estimate noise variance within the ROI (flag -m is required).\n'
' mult: Estimate noise variance over time across volumes specified with -vol.',
choices=('diff', 'mult'),
default='diff')
optional.add_argument(
'-vol',
help='Volumes to compute SNR from. Separate with "," (Example: -vol 0,1), or select range '
'using ":" (Example: -vol 2:50). By default, all volumes in series are selected.',
metavar=Metavar.str,
default='')
optional.add_argument(
'-r',
type=int,
help='Remove temporary files.',
default=1,
choices=(0, 1))
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
#Add optional argument for saving into Text file
optional.add_argument(
'-o',
type=str,
default=None
)
return parser
|
def get_parser():
parser = SCTArgumentParser(
description='Compute SNR using methods described in [Dietrich et al., Measurement of'
' signal-to-noise ratios in MR images: Influence of multichannel coils, parallel '
'imaging, and reconstruction filters. J Magn Reson Imaging 2007; 26(2): 375-385].'
)
mandatoryArguments = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatoryArguments.add_argument(
'-i',
required=True,
help='4D data to compute the SNR on (along the 4th dimension). Example: b0s.nii.gz',
metavar=Metavar.file)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
optional.add_argument(
'-m',
help='Binary (or weighted) mask within which SNR will be averaged. Example: dwi_moco_mean_seg.nii.gz',
metavar=Metavar.file,
default='')
optional.add_argument(
'-method',
help='R|Method to use to compute the SNR:\n'
' diff (default): Substract two volumes (defined by -vol) and estimate noise variance within the ROI (flag -m is required).\n'
' mult: Estimate noise variance over time across volumes specified with -vol.',
choices=('diff', 'mult'),
default='diff')
optional.add_argument(
'-vol',
help='Volumes to compute SNR from. Separate with "," (Example: -vol 0,1), or select range '
'using ":" (Example: -vol 2:50). By default, all volumes in series are selected.',
metavar=Metavar.str,
default='')
optional.add_argument(
'-r',
type=int,
help='Remove temporary files.',
default=1,
choices=(0, 1))
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
optional.add_argument(
'-o',
metavar=Metavar.str,
type=str,
default=None,
help="File name where to write the computed SNR."
)
return parser
|
42,477 |
def analyze_videos(
config,
videos,
videotype="",
shuffle=1,
trainingsetindex=0,
gputouse=None,
save_as_csv=False,
destfolder=None,
batchsize=None,
cropping=None,
TFGPUinference=True,
dynamic=(False, 0.5, 10),
modelprefix="",
robust_nframes=False,
allow_growth=False,
use_shelve=False,
auto_track=True,
n_tracks=None,
calibrate=False,
identity_only=False,
use_openvino="CPU" if is_openvino_available else None,
):
"""Makes prediction based on a trained network.
The index of the trained network is specified by parameters in the config file
(in particular the variable 'snapshotindex').
Parameters
----------
config: str
Full path of the config.yaml file.
videos: list[str]
A list of strings containing the full paths to videos for analysis or a path to
the directory, where all the videos with same extension are stored.
videotype: str, optional, default=""
Checks for the extension of the video in case the input to the video is a
directory. Only videos with this extension are analyzed. If left unspecified,
videos with common extensions ('avi', 'mp4', 'mov', 'mpeg', 'mkv') are kept.
shuffle: int, optional, default=1
An integer specifying the shuffle index of the training dataset used for
training the network.
trainingsetindex: int, optional, default=0
Integer specifying which TrainingsetFraction to use.
By default the first (note that TrainingFraction is a list in config.yaml).
gputouse: int or None, optional, default=None
Indicates the GPU to use (see number in ``nvidia-smi``). If you do not have a
GPU put ``None``.
See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries
save_as_csv: bool, optional, default=False
Saves the predictions in a .csv file.
destfolder: string or None, optional, default=None
Specifies the destination folder for analysis data. If ``None``, the path of
the video is used. Note that for subsequent analysis this folder also needs to
be passed.
batchsize: int or None, optional, default=None
Change batch size for inference; if given overwrites value in ``pose_cfg.yaml``.
cropping: list or None, optional, default=None
List of cropping coordinates as [x1, x2, y1, y2].
Note that the same cropping parameters will then be used for all videos.
If different video crops are desired, run ``analyze_videos`` on individual
videos with the corresponding cropping coordinates.
TFGPUinference: bool, optional, default=True
Perform inference on GPU with TensorFlow code. Introduced in "Pretraining
boosts out-of-domain robustness for pose estimation" by Alexander Mathis,
Mert Yüksekgönül, Byron Rogers, Matthias Bethge, Mackenzie W. Mathis.
Source: https://arxiv.org/abs/1909.11229
dynamic: tuple(bool, float, int) triple containing (state, detectiontreshold, margin)
If the state is true, then dynamic cropping will be performed. That means that if an object is detected (i.e. any body part > detectiontreshold),
then object boundaries are computed according to the smallest/largest x position and smallest/largest y position of all body parts. This window is
expanded by the margin and from then on only the posture within this crop is analyzed (until the object is lost, i.e. <detectiontreshold). The
current position is utilized for updating the crop window for the next frame (this is why the margin is important and should be set large
enough given the movement of the animal).
modelprefix: str, optional, default=""
Directory containing the deeplabcut models to use when evaluating the network.
By default, the models are assumed to exist in the project folder.
robust_nframes: bool, optional, default=False
Evaluate a video's number of frames in a robust manner.
This option is slower (as the whole video is read frame-by-frame),
but does not rely on metadata, hence its robustness against file corruption.
allow_growth: bool, optional, default=False.
For some smaller GPUs the memory issues happen. If ``True``, the memory
allocator does not pre-allocate the entire specified GPU memory region, instead
starting small and growing as needed.
See issue: https://forum.image.sc/t/how-to-stop-running-out-of-vram/30551/2
use_shelve: bool, optional, default=False
By default, data are dumped in a pickle file at the end of the video analysis.
Otherwise, data are written to disk on the fly using a "shelf"; i.e., a
pickle-based, persistent, database-like object by default, resulting in
constant memory footprint.
The following parameters are only relevant for multi-animal projects:
auto_track: bool, optional, default=True
By default, tracking and stitching are automatically performed, producing the
final h5 data file. This is equivalent to the behavior for single-animal
projects.
If ``False``, one must run ``convert_detections2tracklets`` and
``stitch_tracklets`` afterwards, in order to obtain the h5 file.
This function has 3 related sub-calls:
identity_only: bool, optional, default=False
If ``True`` and animal identity was learned by the model, assembly and tracking
rely exclusively on identity prediction.
calibrate: bool, optional, default=False
If ``True``, use training data to calibrate the animal assembly procedure. This
improves its robustness to wrong body part links, but requires very little
missing data.
n_tracks: int or None, optional, default=None
Number of tracks to reconstruct. By default, taken as the number of individuals
defined in the config.yaml. Another number can be passed if the number of
animals in the video is different from the number of animals the model was
trained on.
use_openvino: str, optional
Use "CPU" for inference if OpenVINO is available in the Python environment.
Returns
-------
pandas array
The labels are stored as MultiIndex Pandas Array, which contains the name of
the network, body part name, (x, y) label position in pixels, and the
likelihood for each frame per body part. These arrays are stored in an
efficient Hierarchical Data Format (HDF) in the same directory, where the video
is stored. However, if the flag save_as_csv is set to True, the data can also
be exported in comma-separated values format (.csv), which in turn can be
imported in many programs, such as MATLAB, R, Prism, etc.
Examples
--------
Analysing a single video on Windows
>>> deeplabcut.analyze_videos(
'C:\\myproject\\reaching-task\\config.yaml',
['C:\\yourusername\\rig-95\\Videos\\reachingvideo1.avi'],
)
Analyzing a single video on Linux/MacOS
>>> deeplabcut.analyze_videos(
'/analysis/project/reaching-task/config.yaml',
['/analysis/project/videos/reachingvideo1.avi'],
)
Analyze all videos of type ``avi`` in a folder
>>> deeplabcut.analyze_videos(
'/analysis/project/reaching-task/config.yaml',
['/analysis/project/videos'],
videotype='.avi',
)
Analyze multiple videos
>>> deeplabcut.analyze_videos(
'/analysis/project/reaching-task/config.yaml',
[
'/analysis/project/videos/reachingvideo1.avi',
'/analysis/project/videos/reachingvideo2.avi',
],
)
Analyze multiple videos with ``shuffle=2``
>>> deeplabcut.analyze_videos(
'/analysis/project/reaching-task/config.yaml',
[
'/analysis/project/videos/reachingvideo1.avi',
'/analysis/project/videos/reachingvideo2.avi',
],
shuffle=2,
)
Analyze multiple videos with ``shuffle=2``, save results as an additional csv file
>>> deeplabcut.analyze_videos(
'/analysis/project/reaching-task/config.yaml',
[
'/analysis/project/videos/reachingvideo1.avi',
'/analysis/project/videos/reachingvideo2.avi',
],
shuffle=2,
save_as_csv=True,
)
"""
if "TF_CUDNN_USE_AUTOTUNE" in os.environ:
del os.environ["TF_CUDNN_USE_AUTOTUNE"] # was potentially set during training
if gputouse is not None: # gpu selection
os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)
tf.compat.v1.reset_default_graph()
start_path = os.getcwd() # record cwd to return to this directory in the end
cfg = auxiliaryfunctions.read_config(config)
trainFraction = cfg["TrainingFraction"][trainingsetindex]
iteration = cfg["iteration"]
if cropping is not None:
cfg["cropping"] = True
cfg["x1"], cfg["x2"], cfg["y1"], cfg["y2"] = cropping
print("Overwriting cropping parameters:", cropping)
print("These are used for all videos, but won't be save to the cfg file.")
modelfolder = os.path.join(
cfg["project_path"],
str(
auxiliaryfunctions.get_model_folder(
trainFraction, shuffle, cfg, modelprefix=modelprefix
)
),
)
path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"
try:
dlc_cfg = load_config(str(path_test_config))
except FileNotFoundError:
raise FileNotFoundError(
"It seems the model for iteration %s and shuffle %s and trainFraction %s does not exist."
% (iteration, shuffle, trainFraction)
)
# Check which snapshots are available and sort them by # iterations
try:
Snapshots = np.array(
[
fn.split(".")[0]
for fn in os.listdir(os.path.join(modelfolder, "train"))
if "index" in fn
]
)
except FileNotFoundError:
raise FileNotFoundError(
"Snapshots not found! It seems the dataset for shuffle %s has not been trained/does not exist.\n Be sure you also have the intended iteration number set.\n Please train it before using it to analyze videos.\n Use the function 'train_network' to train the network for shuffle %s."
% (shuffle, shuffle)
)
if cfg["snapshotindex"] == "all":
print(
"Snapshotindex is set to 'all' in the config.yaml file. Running video analysis with all snapshots is very costly! Use the function 'evaluate_network' to choose the best the snapshot. For now, changing snapshot index to -1!"
)
snapshotindex = -1
else:
snapshotindex = cfg["snapshotindex"]
increasing_indices = np.argsort([int(m.split("-")[1]) for m in Snapshots])
Snapshots = Snapshots[increasing_indices]
print("Using %s" % Snapshots[snapshotindex], "for model", modelfolder)
##################################################
# Load and setup CNN part detector
##################################################
# Check if data already was generated:
dlc_cfg["init_weights"] = os.path.join(
modelfolder, "train", Snapshots[snapshotindex]
)
trainingsiterations = (dlc_cfg["init_weights"].split(os.sep)[-1]).split("-")[-1]
# Update number of output and batchsize
dlc_cfg["num_outputs"] = cfg.get("num_outputs", dlc_cfg.get("num_outputs", 1))
if batchsize == None:
# update batchsize (based on parameters in config.yaml)
dlc_cfg["batch_size"] = cfg["batch_size"]
else:
dlc_cfg["batch_size"] = batchsize
cfg["batch_size"] = batchsize
if "multi-animal" in dlc_cfg["dataset_type"]:
dynamic = (False, 0.5, 10) # setting dynamic mode to false
TFGPUinference = False
if dynamic[0]: # state=true
# (state,detectiontreshold,margin)=dynamic
print("Starting analysis in dynamic cropping mode with parameters:", dynamic)
dlc_cfg["num_outputs"] = 1
TFGPUinference = False
dlc_cfg["batch_size"] = 1
print(
"Switching batchsize to 1, num_outputs (per animal) to 1 and TFGPUinference to False (all these features are not supported in this mode)."
)
# Name for scorer:
DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
cfg,
shuffle,
trainFraction,
trainingsiterations=trainingsiterations,
modelprefix=modelprefix,
)
if dlc_cfg["num_outputs"] > 1:
if TFGPUinference:
print(
"Switching to numpy-based keypoint extraction code, as multiple point extraction is not supported by TF code currently."
)
TFGPUinference = False
print("Extracting ", dlc_cfg["num_outputs"], "instances per bodypart")
xyz_labs_orig = ["x", "y", "likelihood"]
suffix = [str(s + 1) for s in range(dlc_cfg["num_outputs"])]
suffix[0] = "" # first one has empty suffix for backwards compatibility
xyz_labs = [x + s for s in suffix for x in xyz_labs_orig]
else:
xyz_labs = ["x", "y", "likelihood"]
if use_openvino:
sess, inputs, outputs = predict.setup_openvino_pose_prediction(
dlc_cfg, device=use_openvino
)
elif TFGPUinference:
sess, inputs, outputs = predict.setup_GPUpose_prediction(
dlc_cfg, allow_growth=allow_growth
)
else:
sess, inputs, outputs = predict.setup_pose_prediction(
dlc_cfg, allow_growth=allow_growth
)
pdindex = pd.MultiIndex.from_product(
[[DLCscorer], dlc_cfg["all_joints_names"], xyz_labs],
names=["scorer", "bodyparts", "coords"],
)
##################################################
# Looping over videos
##################################################
Videos = auxiliaryfunctions.get_list_of_videos(videos, videotype)
if len(Videos) > 0:
if "multi-animal" in dlc_cfg["dataset_type"]:
from deeplabcut.pose_estimation_tensorflow.predict_multianimal import (
AnalyzeMultiAnimalVideo,
)
for video in Videos:
AnalyzeMultiAnimalVideo(
video,
DLCscorer,
trainFraction,
cfg,
dlc_cfg,
sess,
inputs,
outputs,
destfolder,
robust_nframes=robust_nframes,
use_shelve=use_shelve,
)
if auto_track: # tracker type is taken from default in cfg
convert_detections2tracklets(
config,
[video],
videotype,
shuffle,
trainingsetindex,
destfolder=destfolder,
modelprefix=modelprefix,
calibrate=calibrate,
identity_only=identity_only,
)
stitch_tracklets(
config,
[video],
videotype,
shuffle,
trainingsetindex,
destfolder=destfolder,
n_tracks=n_tracks,
modelprefix=modelprefix,
)
else:
for video in Videos:
DLCscorer = AnalyzeVideo(
video,
DLCscorer,
DLCscorerlegacy,
trainFraction,
cfg,
dlc_cfg,
sess,
inputs,
outputs,
pdindex,
save_as_csv,
destfolder,
TFGPUinference,
dynamic,
use_openvino,
)
os.chdir(str(start_path))
if "multi-animal" in dlc_cfg["dataset_type"]:
print(
"The videos are analyzed. Time to assemble animals and track 'em... \n Call 'create_video_with_all_detections' to check multi-animal detection quality before tracking."
)
print(
"If the tracking is not satisfactory for some videos, consider expanding the training set. You can use the function 'extract_outlier_frames' to extract a few representative outlier frames."
)
else:
print(
"The videos are analyzed. Now your research can truly start! \n You can create labeled videos with 'create_labeled_video'"
)
print(
"If the tracking is not satisfactory for some videos, consider expanding the training set. You can use the function 'extract_outlier_frames' to extract a few representative outlier frames."
)
return DLCscorer # note: this is either DLCscorer or DLCscorerlegacy depending on what was used!
else:
print("No video(s) were found. Please check your paths and/or 'video_type'.")
return DLCscorer
|
def analyze_videos(
config,
videos,
videotype="",
shuffle=1,
trainingsetindex=0,
gputouse=None,
save_as_csv=False,
destfolder=None,
batchsize=None,
cropping=None,
TFGPUinference=True,
dynamic=(False, 0.5, 10),
modelprefix="",
robust_nframes=False,
allow_growth=False,
use_shelve=False,
auto_track=True,
n_tracks=None,
calibrate=False,
identity_only=False,
use_openvino="CPU" if is_openvino_available else None,
):
"""Makes prediction based on a trained network.
The index of the trained network is specified by parameters in the config file
(in particular the variable 'snapshotindex').
Parameters
----------
config: str
Full path of the config.yaml file.
videos: list[str]
A list of strings containing the full paths to videos for analysis or a path to
the directory, where all the videos with same extension are stored.
videotype: str, optional, default=""
Checks for the extension of the video in case the input to the video is a
directory. Only videos with this extension are analyzed. If left unspecified,
videos with common extensions ('avi', 'mp4', 'mov', 'mpeg', 'mkv') are kept.
shuffle: int, optional, default=1
An integer specifying the shuffle index of the training dataset used for
training the network.
trainingsetindex: int, optional, default=0
Integer specifying which TrainingsetFraction to use.
By default the first (note that TrainingFraction is a list in config.yaml).
gputouse: int or None, optional, default=None
Indicates the GPU to use (see number in ``nvidia-smi``). If you do not have a
GPU put ``None``.
See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries
save_as_csv: bool, optional, default=False
Saves the predictions in a .csv file.
destfolder: string or None, optional, default=None
Specifies the destination folder for analysis data. If ``None``, the path of
the video is used. Note that for subsequent analysis this folder also needs to
be passed.
batchsize: int or None, optional, default=None
Change batch size for inference; if given overwrites value in ``pose_cfg.yaml``.
cropping: list or None, optional, default=None
List of cropping coordinates as [x1, x2, y1, y2].
Note that the same cropping parameters will then be used for all videos.
If different video crops are desired, run ``analyze_videos`` on individual
videos with the corresponding cropping coordinates.
TFGPUinference: bool, optional, default=True
Perform inference on GPU with TensorFlow code. Introduced in "Pretraining
boosts out-of-domain robustness for pose estimation" by Alexander Mathis,
Mert Yüksekgönül, Byron Rogers, Matthias Bethge, Mackenzie W. Mathis.
Source: https://arxiv.org/abs/1909.11229
dynamic: tuple(bool, float, int) triple containing (state, detectiontreshold, margin)
If the state is true, then dynamic cropping will be performed. That means that if an object is detected (i.e. any body part > detectiontreshold),
then object boundaries are computed according to the smallest/largest x position and smallest/largest y position of all body parts. This window is
expanded by the margin and from then on only the posture within this crop is analyzed (until the object is lost, i.e. <detectiontreshold). The
current position is utilized for updating the crop window for the next frame (this is why the margin is important and should be set large
enough given the movement of the animal).
modelprefix: str, optional, default=""
Directory containing the deeplabcut models to use when evaluating the network.
By default, the models are assumed to exist in the project folder.
robust_nframes: bool, optional, default=False
Evaluate a video's number of frames in a robust manner.
This option is slower (as the whole video is read frame-by-frame),
but does not rely on metadata, hence its robustness against file corruption.
allow_growth: bool, optional, default=False.
For some smaller GPUs the memory issues happen. If ``True``, the memory
allocator does not pre-allocate the entire specified GPU memory region, instead
starting small and growing as needed.
See issue: https://forum.image.sc/t/how-to-stop-running-out-of-vram/30551/2
use_shelve: bool, optional, default=False
By default, data are dumped in a pickle file at the end of the video analysis.
Otherwise, data are written to disk on the fly using a "shelf"; i.e., a
pickle-based, persistent, database-like object by default, resulting in
constant memory footprint.
The following parameters are only relevant for multi-animal projects:
auto_track: bool, optional, default=True
By default, tracking and stitching are automatically performed, producing the
final h5 data file. This is equivalent to the behavior for single-animal
projects.
If ``False``, one must run ``convert_detections2tracklets`` and
``stitch_tracklets`` afterwards, in order to obtain the h5 file.
This function has 3 related sub-calls:
identity_only: bool, optional, default=False
If ``True`` and animal identity was learned by the model, assembly and tracking
rely exclusively on identity prediction.
calibrate: bool, optional, default=False
If ``True``, use training data to calibrate the animal assembly procedure. This
improves its robustness to wrong body part links, but requires very little
missing data.
n_tracks: int or None, optional, default=None
Number of tracks to reconstruct. By default, taken as the number of individuals
defined in the config.yaml. Another number can be passed if the number of
animals in the video is different from the number of animals the model was
trained on.
use_openvino: str, optional
Use "CPU" for inference if OpenVINO is available in the Python environment.
Returns
-------
pandas array
The labels are stored as MultiIndex Pandas Array, which contains the name of
the network, body part name, (x, y) label position in pixels, and the
likelihood for each frame per body part. These arrays are stored in an
efficient Hierarchical Data Format (HDF) in the same directory, where the video
is stored. However, if the flag save_as_csv is set to True, the data can also
be exported in comma-separated values format (.csv), which in turn can be
imported in many programs, such as MATLAB, R, Prism, etc.
Examples
--------
Analyzing a single video on Windows
>>> deeplabcut.analyze_videos(
'C:\\myproject\\reaching-task\\config.yaml',
['C:\\yourusername\\rig-95\\Videos\\reachingvideo1.avi'],
)
Analyzing a single video on Linux/MacOS
>>> deeplabcut.analyze_videos(
'/analysis/project/reaching-task/config.yaml',
['/analysis/project/videos/reachingvideo1.avi'],
)
Analyze all videos of type ``avi`` in a folder
>>> deeplabcut.analyze_videos(
'/analysis/project/reaching-task/config.yaml',
['/analysis/project/videos'],
videotype='.avi',
)
Analyze multiple videos
>>> deeplabcut.analyze_videos(
'/analysis/project/reaching-task/config.yaml',
[
'/analysis/project/videos/reachingvideo1.avi',
'/analysis/project/videos/reachingvideo2.avi',
],
)
Analyze multiple videos with ``shuffle=2``
>>> deeplabcut.analyze_videos(
'/analysis/project/reaching-task/config.yaml',
[
'/analysis/project/videos/reachingvideo1.avi',
'/analysis/project/videos/reachingvideo2.avi',
],
shuffle=2,
)
Analyze multiple videos with ``shuffle=2``, save results as an additional csv file
>>> deeplabcut.analyze_videos(
'/analysis/project/reaching-task/config.yaml',
[
'/analysis/project/videos/reachingvideo1.avi',
'/analysis/project/videos/reachingvideo2.avi',
],
shuffle=2,
save_as_csv=True,
)
"""
if "TF_CUDNN_USE_AUTOTUNE" in os.environ:
del os.environ["TF_CUDNN_USE_AUTOTUNE"] # was potentially set during training
if gputouse is not None: # gpu selection
os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)
tf.compat.v1.reset_default_graph()
start_path = os.getcwd() # record cwd to return to this directory in the end
cfg = auxiliaryfunctions.read_config(config)
trainFraction = cfg["TrainingFraction"][trainingsetindex]
iteration = cfg["iteration"]
if cropping is not None:
cfg["cropping"] = True
cfg["x1"], cfg["x2"], cfg["y1"], cfg["y2"] = cropping
print("Overwriting cropping parameters:", cropping)
print("These are used for all videos, but won't be save to the cfg file.")
modelfolder = os.path.join(
cfg["project_path"],
str(
auxiliaryfunctions.get_model_folder(
trainFraction, shuffle, cfg, modelprefix=modelprefix
)
),
)
path_test_config = Path(modelfolder) / "test" / "pose_cfg.yaml"
try:
dlc_cfg = load_config(str(path_test_config))
except FileNotFoundError:
raise FileNotFoundError(
"It seems the model for iteration %s and shuffle %s and trainFraction %s does not exist."
% (iteration, shuffle, trainFraction)
)
# Check which snapshots are available and sort them by # iterations
try:
Snapshots = np.array(
[
fn.split(".")[0]
for fn in os.listdir(os.path.join(modelfolder, "train"))
if "index" in fn
]
)
except FileNotFoundError:
raise FileNotFoundError(
"Snapshots not found! It seems the dataset for shuffle %s has not been trained/does not exist.\n Be sure you also have the intended iteration number set.\n Please train it before using it to analyze videos.\n Use the function 'train_network' to train the network for shuffle %s."
% (shuffle, shuffle)
)
if cfg["snapshotindex"] == "all":
print(
"Snapshotindex is set to 'all' in the config.yaml file. Running video analysis with all snapshots is very costly! Use the function 'evaluate_network' to choose the best the snapshot. For now, changing snapshot index to -1!"
)
snapshotindex = -1
else:
snapshotindex = cfg["snapshotindex"]
increasing_indices = np.argsort([int(m.split("-")[1]) for m in Snapshots])
Snapshots = Snapshots[increasing_indices]
print("Using %s" % Snapshots[snapshotindex], "for model", modelfolder)
##################################################
# Load and setup CNN part detector
##################################################
# Check if data already was generated:
dlc_cfg["init_weights"] = os.path.join(
modelfolder, "train", Snapshots[snapshotindex]
)
trainingsiterations = (dlc_cfg["init_weights"].split(os.sep)[-1]).split("-")[-1]
# Update number of output and batchsize
dlc_cfg["num_outputs"] = cfg.get("num_outputs", dlc_cfg.get("num_outputs", 1))
if batchsize == None:
# update batchsize (based on parameters in config.yaml)
dlc_cfg["batch_size"] = cfg["batch_size"]
else:
dlc_cfg["batch_size"] = batchsize
cfg["batch_size"] = batchsize
if "multi-animal" in dlc_cfg["dataset_type"]:
dynamic = (False, 0.5, 10) # setting dynamic mode to false
TFGPUinference = False
if dynamic[0]: # state=true
# (state,detectiontreshold,margin)=dynamic
print("Starting analysis in dynamic cropping mode with parameters:", dynamic)
dlc_cfg["num_outputs"] = 1
TFGPUinference = False
dlc_cfg["batch_size"] = 1
print(
"Switching batchsize to 1, num_outputs (per animal) to 1 and TFGPUinference to False (all these features are not supported in this mode)."
)
# Name for scorer:
DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
cfg,
shuffle,
trainFraction,
trainingsiterations=trainingsiterations,
modelprefix=modelprefix,
)
if dlc_cfg["num_outputs"] > 1:
if TFGPUinference:
print(
"Switching to numpy-based keypoint extraction code, as multiple point extraction is not supported by TF code currently."
)
TFGPUinference = False
print("Extracting ", dlc_cfg["num_outputs"], "instances per bodypart")
xyz_labs_orig = ["x", "y", "likelihood"]
suffix = [str(s + 1) for s in range(dlc_cfg["num_outputs"])]
suffix[0] = "" # first one has empty suffix for backwards compatibility
xyz_labs = [x + s for s in suffix for x in xyz_labs_orig]
else:
xyz_labs = ["x", "y", "likelihood"]
if use_openvino:
sess, inputs, outputs = predict.setup_openvino_pose_prediction(
dlc_cfg, device=use_openvino
)
elif TFGPUinference:
sess, inputs, outputs = predict.setup_GPUpose_prediction(
dlc_cfg, allow_growth=allow_growth
)
else:
sess, inputs, outputs = predict.setup_pose_prediction(
dlc_cfg, allow_growth=allow_growth
)
pdindex = pd.MultiIndex.from_product(
[[DLCscorer], dlc_cfg["all_joints_names"], xyz_labs],
names=["scorer", "bodyparts", "coords"],
)
##################################################
# Looping over videos
##################################################
Videos = auxiliaryfunctions.get_list_of_videos(videos, videotype)
if len(Videos) > 0:
if "multi-animal" in dlc_cfg["dataset_type"]:
from deeplabcut.pose_estimation_tensorflow.predict_multianimal import (
AnalyzeMultiAnimalVideo,
)
for video in Videos:
AnalyzeMultiAnimalVideo(
video,
DLCscorer,
trainFraction,
cfg,
dlc_cfg,
sess,
inputs,
outputs,
destfolder,
robust_nframes=robust_nframes,
use_shelve=use_shelve,
)
if auto_track: # tracker type is taken from default in cfg
convert_detections2tracklets(
config,
[video],
videotype,
shuffle,
trainingsetindex,
destfolder=destfolder,
modelprefix=modelprefix,
calibrate=calibrate,
identity_only=identity_only,
)
stitch_tracklets(
config,
[video],
videotype,
shuffle,
trainingsetindex,
destfolder=destfolder,
n_tracks=n_tracks,
modelprefix=modelprefix,
)
else:
for video in Videos:
DLCscorer = AnalyzeVideo(
video,
DLCscorer,
DLCscorerlegacy,
trainFraction,
cfg,
dlc_cfg,
sess,
inputs,
outputs,
pdindex,
save_as_csv,
destfolder,
TFGPUinference,
dynamic,
use_openvino,
)
os.chdir(str(start_path))
if "multi-animal" in dlc_cfg["dataset_type"]:
print(
"The videos are analyzed. Time to assemble animals and track 'em... \n Call 'create_video_with_all_detections' to check multi-animal detection quality before tracking."
)
print(
"If the tracking is not satisfactory for some videos, consider expanding the training set. You can use the function 'extract_outlier_frames' to extract a few representative outlier frames."
)
else:
print(
"The videos are analyzed. Now your research can truly start! \n You can create labeled videos with 'create_labeled_video'"
)
print(
"If the tracking is not satisfactory for some videos, consider expanding the training set. You can use the function 'extract_outlier_frames' to extract a few representative outlier frames."
)
return DLCscorer # note: this is either DLCscorer or DLCscorerlegacy depending on what was used!
else:
print("No video(s) were found. Please check your paths and/or 'video_type'.")
return DLCscorer
|
16,205 |
def number_validator(value: Any) -> int | float:
"""Coerce a value to number without losing precision."""
if isinstance(value, int):
return value
if isinstance(value, float):
return value
try:
value = int(value)
return cast(int, value)
except (TypeError, ValueError):
pass
try:
value = float(value)
return cast(float, value)
except (TypeError, ValueError) as err:
raise vol.Invalid(f"invalid number {value}") from err
|
def number_validator(value: Any) -> int | float:
"""Coerce a value to number without losing precision."""
if isinstance(value, int):
return value
if isinstance(value, float):
return value
try:
value = int(value)
return cast(int, value)
except (TypeError, ValueError):
pass
try:
value = float(value)
return float(value)
except (TypeError, ValueError) as err:
raise vol.Invalid(f"invalid number {value}") from err
|
29,920 |
def run_prepro_levels(rgi_version=None, rgi_reg=None, border=None,
output_folder='', working_dir='', dem_source='',
is_test=False, test_nr=4, demo=False, test_rgidf=None,
test_intersects_file=None, test_topofile=None,
test_crudir=None, disable_mp=False, timeout=0,
max_level=4, logging_level='WORKFLOW',
map_maxd=None, map_d1=None):
"""Does the actual job.
Parameters
----------
rgi_version : str
the RGI version to use (defaults to cfg.PARAMS)
rgi_reg : str
the RGI region to process
border : int
the number of pixels at the maps border
output_folder : str
path to the output folder (where to put the preprocessed tar files)
dem_source : str
which DEM source to use: default, SOURCE_NAME or ALL
working_dir : str
path to the OGGM working directory
is_test : bool
to test on a couple of glaciers only!
test_nr : int
if is_test = True: Amount of glaciers to test
demo : bool
to run the prepro for the list of demo glaciers
test_rgidf : shapefile
for testing purposes only
test_intersects_file : shapefile
for testing purposes only
test_topofile : str
for testing purposes only
test_crudir : str
for testing purposes only
disable_mp : bool
disable multiprocessing
max_level : int
the maximum pre-processing level before stopping
logging_level : str
the logging level to use (DEBUG, INFO, WARNING, WORKFLOW)
map_maxd : float
maximum resolution [m] of spatial grid resolution
map_d1 : float
equation parameter which is used to calculate the grid resolution
"""
# TODO: temporarily silence Fiona deprecation warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Input check
if max_level not in [1, 2, 3, 4]:
raise InvalidParamsError('max_level should be one of [1, 2, 3, 4]')
# Time
start = time.time()
def _time_log():
# Log util
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.workflow('OGGM prepro_levels is done! Time needed: '
'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))
# Initialize OGGM and set up the run parameters
cfg.initialize(logging_level=logging_level)
# Local paths
utils.mkdir(working_dir)
cfg.PATHS['working_dir'] = working_dir
# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = not disable_mp
# How many grid points around the glacier?
# Make it large if you expect your glaciers to grow large
cfg.PARAMS['border'] = border
# Size of the spatial map
cfg.PARAMS['dmax'] = map_maxd if map_maxd else cfg.PARAMS['dmax']
cfg.PARAMS['d1'] = map_d1 if map_d1 else cfg.PARAMS['d1']
# Set to True for operational runs
cfg.PARAMS['continue_on_error'] = True
# Timeout
cfg.PARAMS['task_timeout'] = timeout
# For statistics
climate_periods = [1920, 1960, 2000]
if rgi_version is None:
rgi_version = cfg.PARAMS['rgi_version']
rgi_dir_name = 'RGI{}'.format(rgi_version)
border_dir_name = 'b_{:03d}'.format(border)
base_dir = os.path.join(output_folder, rgi_dir_name, border_dir_name)
# Add a package version file
utils.mkdir(base_dir)
opath = os.path.join(base_dir, 'package_versions.txt')
with open(opath, 'w') as vfile:
vfile.write(utils.show_versions(logger=log))
if demo:
rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
elif test_rgidf is None:
# Get the RGI file
rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
version=rgi_version))
# We use intersects
rgif = utils.get_rgi_intersects_region_file(rgi_reg,
version=rgi_version)
cfg.set_intersects_db(rgif)
else:
rgidf = test_rgidf
cfg.set_intersects_db(test_intersects_file)
if is_test:
# Just for fun
rgidf = rgidf.sample(test_nr)
# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)
log.workflow('Starting prepro run for RGI reg: {} '
'and border: {}'.format(rgi_reg, border))
log.workflow('Number of glaciers: {}'.format(len(rgidf)))
# Input
if test_topofile:
cfg.PATHS['dem_file'] = test_topofile
# L1 - initialize working directories
# Which DEM source?
if dem_source.upper() == 'ALL':
# This is the complex one, just do the job an leave
log.workflow('Running prepro on ALL sources')
for i, s in enumerate(utils.DEM_SOURCES):
rs = i == 0
rgidf['DEM_SOURCE'] = s
log.workflow('Running prepro on sources: {}'.format(s))
gdirs = []
for_task = []
for _, entity in rgidf.iterrows():
gdir = GlacierDirectory(entity, reset=rs)
for_task.append((gdir, dict(entity=entity)))
gdirs.append(gdir)
workflow.execute_entity_task(tasks.define_glacier_region, for_task)
workflow.execute_entity_task(_rename_dem_folder, gdirs, source=s)
# make a GeoTiff mask of the glacier, choose any source
workflow.execute_entity_task(gis.rasterio_glacier_mask,
gdirs, source='ALL')
# Compress all in output directory
l_base_dir = os.path.join(base_dir, 'L1')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
_time_log()
return
if dem_source:
# Force a given source
rgidf['DEM_SOURCE'] = dem_source.upper()
# L1 - go
gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L1', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# L1 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L1')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 1:
_time_log()
return
# L2 - Tasks
# Pre-download other files just in case
if test_crudir is None:
_ = utils.get_cru_file(var='tmp')
_ = utils.get_cru_file(var='pre')
else:
cfg.PATHS['cru_dir'] = test_crudir
workflow.execute_entity_task(tasks.process_cru_data, gdirs)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L2', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# L2 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L2')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 2:
_time_log()
return
# L3 - Tasks
task_list = [
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.compute_downstream_line,
tasks.compute_downstream_bedshape,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
tasks.local_t_star,
tasks.mu_star_calibration,
tasks.prepare_for_inversion,
tasks.mass_conservation_inversion,
tasks.filter_inversion_output,
tasks.init_present_time_glacier
]
for task in task_list:
workflow.execute_entity_task(task, gdirs)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L3', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
utils.compile_climate_statistics(gdirs, add_climate_period=climate_periods,
path=opath)
# L3 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L3')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 3:
_time_log()
return
# L4 - No tasks: add some stats for consistency and make the dirs small
sum_dir = os.path.join(base_dir, 'L4', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# Copy mini data to new dir
base_dir = os.path.join(base_dir, 'L4')
mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir, gdirs,
base_dir=base_dir)
# L4 OK - compress all in output directory
workflow.execute_entity_task(utils.gdir_to_tar, mini_gdirs, delete=True)
utils.base_dir_to_tar(base_dir)
_time_log()
|
def run_prepro_levels(rgi_version=None, rgi_reg=None, border=None,
output_folder='', working_dir='', dem_source='',
is_test=False, test_nr=4, demo=False, test_rgidf=None,
test_intersects_file=None, test_topofile=None,
test_crudir=None, disable_mp=False, timeout=0,
max_level=4, logging_level='WORKFLOW',
map_maxd=None, map_d1=None):
"""Does the actual job.
Parameters
----------
rgi_version : str
the RGI version to use (defaults to cfg.PARAMS)
rgi_reg : str
the RGI region to process
border : int
the number of pixels at the maps border
output_folder : str
path to the output folder (where to put the preprocessed tar files)
dem_source : str
which DEM source to use: default, SOURCE_NAME or ALL
working_dir : str
path to the OGGM working directory
is_test : bool
to test on a couple of glaciers only!
test_nr : int
if is_test = True: Amount of glaciers to test
demo : bool
to run the prepro for the list of demo glaciers
test_rgidf : shapefile
for testing purposes only
test_intersects_file : shapefile
for testing purposes only
test_topofile : str
for testing purposes only
test_crudir : str
for testing purposes only
disable_mp : bool
disable multiprocessing
max_level : int
the maximum pre-processing level before stopping
logging_level : str
the logging level to use (DEBUG, INFO, WARNING, WORKFLOW)
map_maxd : float
maximum resolution [m] of spatial grid resolution
map_d1 : float
equation parameter which is used to calculate the grid resolution
"""
# TODO: temporarily silence Fiona deprecation warnings
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Input check
if max_level not in [1, 2, 3, 4]:
raise InvalidParamsError('max_level should be one of [1, 2, 3, 4]')
# Time
start = time.time()
def _time_log():
# Log util
m, s = divmod(time.time() - start, 60)
h, m = divmod(m, 60)
log.workflow('OGGM prepro_levels is done! Time needed: '
'{:02d}:{:02d}:{:02d}'.format(int(h), int(m), int(s)))
# Initialize OGGM and set up the run parameters
cfg.initialize(logging_level=logging_level)
# Local paths
utils.mkdir(working_dir)
cfg.PATHS['working_dir'] = working_dir
# Use multiprocessing?
cfg.PARAMS['use_multiprocessing'] = not disable_mp
# How many grid points around the glacier?
# Make it large if you expect your glaciers to grow large
cfg.PARAMS['border'] = border
# Size of the spatial map
cfg.PARAMS['dmax'] = map_dmax if map_dmax else cfg.PARAMS['dmax']
cfg.PARAMS['d1'] = map_d1 if map_d1 else cfg.PARAMS['d1']
# Set to True for operational runs
cfg.PARAMS['continue_on_error'] = True
# Timeout
cfg.PARAMS['task_timeout'] = timeout
# For statistics
climate_periods = [1920, 1960, 2000]
if rgi_version is None:
rgi_version = cfg.PARAMS['rgi_version']
rgi_dir_name = 'RGI{}'.format(rgi_version)
border_dir_name = 'b_{:03d}'.format(border)
base_dir = os.path.join(output_folder, rgi_dir_name, border_dir_name)
# Add a package version file
utils.mkdir(base_dir)
opath = os.path.join(base_dir, 'package_versions.txt')
with open(opath, 'w') as vfile:
vfile.write(utils.show_versions(logger=log))
if demo:
rgidf = utils.get_rgi_glacier_entities(cfg.DATA['demo_glaciers'].index)
elif test_rgidf is None:
# Get the RGI file
rgidf = gpd.read_file(utils.get_rgi_region_file(rgi_reg,
version=rgi_version))
# We use intersects
rgif = utils.get_rgi_intersects_region_file(rgi_reg,
version=rgi_version)
cfg.set_intersects_db(rgif)
else:
rgidf = test_rgidf
cfg.set_intersects_db(test_intersects_file)
if is_test:
# Just for fun
rgidf = rgidf.sample(test_nr)
# Sort for more efficient parallel computing
rgidf = rgidf.sort_values('Area', ascending=False)
log.workflow('Starting prepro run for RGI reg: {} '
'and border: {}'.format(rgi_reg, border))
log.workflow('Number of glaciers: {}'.format(len(rgidf)))
# Input
if test_topofile:
cfg.PATHS['dem_file'] = test_topofile
# L1 - initialize working directories
# Which DEM source?
if dem_source.upper() == 'ALL':
# This is the complex one, just do the job an leave
log.workflow('Running prepro on ALL sources')
for i, s in enumerate(utils.DEM_SOURCES):
rs = i == 0
rgidf['DEM_SOURCE'] = s
log.workflow('Running prepro on sources: {}'.format(s))
gdirs = []
for_task = []
for _, entity in rgidf.iterrows():
gdir = GlacierDirectory(entity, reset=rs)
for_task.append((gdir, dict(entity=entity)))
gdirs.append(gdir)
workflow.execute_entity_task(tasks.define_glacier_region, for_task)
workflow.execute_entity_task(_rename_dem_folder, gdirs, source=s)
# make a GeoTiff mask of the glacier, choose any source
workflow.execute_entity_task(gis.rasterio_glacier_mask,
gdirs, source='ALL')
# Compress all in output directory
l_base_dir = os.path.join(base_dir, 'L1')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
_time_log()
return
if dem_source:
# Force a given source
rgidf['DEM_SOURCE'] = dem_source.upper()
# L1 - go
gdirs = workflow.init_glacier_regions(rgidf, reset=True, force=True)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L1', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# L1 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L1')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 1:
_time_log()
return
# L2 - Tasks
# Pre-download other files just in case
if test_crudir is None:
_ = utils.get_cru_file(var='tmp')
_ = utils.get_cru_file(var='pre')
else:
cfg.PATHS['cru_dir'] = test_crudir
workflow.execute_entity_task(tasks.process_cru_data, gdirs)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L2', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# L2 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L2')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 2:
_time_log()
return
# L3 - Tasks
task_list = [
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.compute_downstream_line,
tasks.compute_downstream_bedshape,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
tasks.local_t_star,
tasks.mu_star_calibration,
tasks.prepare_for_inversion,
tasks.mass_conservation_inversion,
tasks.filter_inversion_output,
tasks.init_present_time_glacier
]
for task in task_list:
workflow.execute_entity_task(task, gdirs)
# Glacier stats
sum_dir = os.path.join(base_dir, 'L3', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
opath = os.path.join(sum_dir, 'climate_statistics_{}.csv'.format(rgi_reg))
utils.compile_climate_statistics(gdirs, add_climate_period=climate_periods,
path=opath)
# L3 OK - compress all in output directory
l_base_dir = os.path.join(base_dir, 'L3')
workflow.execute_entity_task(utils.gdir_to_tar, gdirs, delete=False,
base_dir=l_base_dir)
utils.base_dir_to_tar(l_base_dir)
if max_level == 3:
_time_log()
return
# L4 - No tasks: add some stats for consistency and make the dirs small
sum_dir = os.path.join(base_dir, 'L4', 'summary')
utils.mkdir(sum_dir)
opath = os.path.join(sum_dir, 'glacier_statistics_{}.csv'.format(rgi_reg))
utils.compile_glacier_statistics(gdirs, path=opath)
# Copy mini data to new dir
base_dir = os.path.join(base_dir, 'L4')
mini_gdirs = workflow.execute_entity_task(tasks.copy_to_basedir, gdirs,
base_dir=base_dir)
# L4 OK - compress all in output directory
workflow.execute_entity_task(utils.gdir_to_tar, mini_gdirs, delete=True)
utils.base_dir_to_tar(base_dir)
_time_log()
|
1,931 |
def _get_target_scores(X, estimator, response_method, pos_label=None):
"""Return target scores and positive label.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
response_method: {'auto', 'predict_proba', 'decision_function'}
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
pos_label : str or int, default=None
The class considered as the positive class when computing
the metrics. By default, `estimators.classes_[1]` is
considered as the positive class.
Returns
-------
y_pred: array, shape=(n_samples,)
Target scores calculated from the provided response_method
and pos_label.
pos_label: str or int
The class considered as the positive class when computing
the metrics.
"""
classification_error = (
"{} should be a binary classifier".format(estimator.__class__.__name__)
)
if not is_classifier(estimator):
raise ValueError(classification_error)
prediction_method = _check_classifier_response_method(
estimator, response_method)
y_pred = prediction_method(X)
if pos_label is not None and pos_label not in estimator.classes_:
raise ValueError(
f"The class provided by 'pos_label' is unknown. Got "
f"{pos_label} instead of one of {estimator.classes_}"
)
if y_pred.ndim != 1: # `predict_proba`
if y_pred.shape[1] != 2:
raise ValueError(classification_error)
if pos_label is None:
pos_label = estimator.classes_[1]
y_pred = y_pred[:, 1]
else:
class_idx = np.flatnonzero(estimator.classes_ == pos_label)
y_pred = y_pred[:, class_idx]
else:
if pos_label is None:
pos_label = estimator.classes_[1]
elif pos_label == estimator.classes_[0]:
y_pred *= -1
return y_pred, pos_label
|
def _get_target_scores(X, estimator, response_method, pos_label=None):
"""Return target scores and positive label.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input values.
estimator : estimator instance
Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline`
in which the last estimator is a classifier.
response_method: {'auto', 'predict_proba', 'decision_function'}
Specifies whether to use :term:`predict_proba` or
:term:`decision_function` as the target response. If set to 'auto',
:term:`predict_proba` is tried first and if it does not exist
:term:`decision_function` is tried next.
pos_label : str or int, default=None
The class considered as the positive class when computing
the metrics. By default, `estimators.classes_[1]` is
considered as the positive class.
Returns
-------
y_pred: ndarray of shape (n_samples,)
Target scores calculated from the provided response_method
and pos_label.
pos_label: str or int
The class considered as the positive class when computing
the metrics.
"""
classification_error = (
"{} should be a binary classifier".format(estimator.__class__.__name__)
)
if not is_classifier(estimator):
raise ValueError(classification_error)
prediction_method = _check_classifier_response_method(
estimator, response_method)
y_pred = prediction_method(X)
if pos_label is not None and pos_label not in estimator.classes_:
raise ValueError(
f"The class provided by 'pos_label' is unknown. Got "
f"{pos_label} instead of one of {estimator.classes_}"
)
if y_pred.ndim != 1: # `predict_proba`
if y_pred.shape[1] != 2:
raise ValueError(classification_error)
if pos_label is None:
pos_label = estimator.classes_[1]
y_pred = y_pred[:, 1]
else:
class_idx = np.flatnonzero(estimator.classes_ == pos_label)
y_pred = y_pred[:, class_idx]
else:
if pos_label is None:
pos_label = estimator.classes_[1]
elif pos_label == estimator.classes_[0]:
y_pred *= -1
return y_pred, pos_label
|
40,378 |
def mask_to_index(mask: Tensor) -> Tensor:
r"""Converts mask to indicies representation
Args:
mask (Tensor): The mask.
"""
return mask.nonzero(as_tuple=False).view(-1)
|
def mask_to_index(mask: Tensor) -> Tensor:
r"""Converts mask to indices representation
Args:
mask (Tensor): The mask.
"""
return mask.nonzero(as_tuple=False).view(-1)
|
24,861 |
def my_func(self, doc_type):
"""This is a docstring.
Arguments
---------
doc_type : str
Numpy
"""
return
|
def my_func(self, doc_type):
"""ignores_numpy_return_none
Arguments
---------
doc_type : str
Numpy
"""
return
|
40,750 |
def test_parallel_error():
with pytest.raises(ValueError, match=r"Unknown backend 'abc'. Available backends:"):
idist.Parallel(backend="abc")
with pytest.raises(ValueError, match=r"If backend is None, argument 'nnodes' should be also None"):
idist.Parallel(nnodes=2)
with pytest.raises(ValueError, match=r"Argument nproc_per_node should positive"):
idist.Parallel(backend="gloo", nproc_per_node=-1)
with pytest.raises(ValueError, match=r"Argument nnodes should positive"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=-1)
with pytest.raises(ValueError, match=r"If number of nodes larger than one"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=2)
with pytest.raises(ValueError, match=r"Argument node_rank should be between 0 and"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=2, node_rank=2)
with pytest.raises(ValueError, match=r"If number of nodes larger than one, arguments master_addr and master_port"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=2, node_rank=1)
|
def test_parallel_wrong_inputs():
with pytest.raises(ValueError, match=r"Unknown backend 'abc'. Available backends:"):
idist.Parallel(backend="abc")
with pytest.raises(ValueError, match=r"If backend is None, argument 'nnodes' should be also None"):
idist.Parallel(nnodes=2)
with pytest.raises(ValueError, match=r"Argument nproc_per_node should positive"):
idist.Parallel(backend="gloo", nproc_per_node=-1)
with pytest.raises(ValueError, match=r"Argument nnodes should positive"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=-1)
with pytest.raises(ValueError, match=r"If number of nodes larger than one"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=2)
with pytest.raises(ValueError, match=r"Argument node_rank should be between 0 and"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=2, node_rank=2)
with pytest.raises(ValueError, match=r"If number of nodes larger than one, arguments master_addr and master_port"):
idist.Parallel(backend="gloo", nproc_per_node=1, nnodes=2, node_rank=1)
|
17,154 |
def _condition_based_services(
vehicle: MyBMWVehicle, unit_system: UnitSystem
) -> dict[str, Any]:
extra_attributes = {}
for report in vehicle.condition_based_services.messages:
if (
report.service_type not in ALLOWED_CONDITION_BASED_SERVICE_KEYS
and report.service_type not in LOGGED_CONDITION_BASED_SERVICE_WARINGS
):
_LOGGER.warning(
"'%s' not an allowed condition based service (%s)",
report.service_type,
report,
)
LOGGED_CONDITION_BASED_SERVICE_WARINGS.add(report.service_type)
continue
extra_attributes.update(_format_cbs_report(report, unit_system))
return extra_attributes
|
def _condition_based_services(
vehicle: MyBMWVehicle, unit_system: UnitSystem
) -> dict[str, Any]:
extra_attributes = {}
for report in vehicle.condition_based_services.messages:
if (
report.service_type not in ALLOWED_CONDITION_BASED_SERVICE_KEYS
and report.service_type not in LOGGED_CONDITION_BASED_SERVICE_WARNINGS
):
_LOGGER.warning(
"'%s' not an allowed condition based service (%s)",
report.service_type,
report,
)
LOGGED_CONDITION_BASED_SERVICE_WARINGS.add(report.service_type)
continue
extra_attributes.update(_format_cbs_report(report, unit_system))
return extra_attributes
|
40,577 |
def load_timeseries_opsd(years=None, fn=None, countries=None, source="ENTSOE_power_statistics"):
"""
Read load data from OPSD time-series package version 2019-06-05.
Parameters
----------
years : None or slice()
Years for which to read load data (defaults to
slice("2018","2019"))
fn : file name
countries : Countries for which to read load data.
source : "ENTSOE_transparency" or "ENTSOE_power_statistics"
Returns
-------
load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries
"""
if countries is None:
countries = snakemake.config['countries']
if source == 'ENTSOE_transparency':
load = (pd.read_csv(fn, index_col=0, parse_dates=True)
.loc[:, lambda df: df.columns.to_series().str.endswith('_load_actual_entsoe_transparency')]
.rename(columns=lambda s: s[:-len('_load_actual_entsoe_transparency')])
.dropna(how="all", axis=0))
elif source == 'ENTSOE_power_statistics':
load = (pd.read_csv(fn, index_col=0, parse_dates=True)
.loc[:, lambda df: df.columns.to_series().str.endswith('_load_actual_entsoe_power_statistics')]
.rename(columns=lambda s: s[:-len('_load_actual_entsoe_power_statistics')])
.dropna(how="all", axis=0))
else:
logger.warning("Please proviede correct source name for load data")
if 'GB_UKM' in load.columns:
load.rename(columns={'GB_UKM' : 'GB'}, inplace=True)
load = load.filter(items=countries)
if years is not None:
load = load.loc[years]
return load
|
def load_timeseries_opsd(years=None, fn=None, countries=None, source="ENTSOE_power_statistics"):
"""
Read load data from OPSD time-series package version 2019-06-05.
Parameters
----------
years : None or slice()
Years for which to read load data (defaults to
slice("2018","2019"))
fn : file name
countries : Countries for which to read load data.
source : "ENTSOE_transparency" or "ENTSOE_power_statistics"
Returns
-------
load : pd.DataFrame
Load time-series with UTC timestamps x ISO-2 countries
"""
if countries is None:
countries = snakemake.config['countries']
if source == 'ENTSOE_transparency':
load = (pd.read_csv(fn, index_col=0, parse_dates=True)
.filter(like='_load_actual_entsoe_transparency')
.rename(columns=lambda s: s[:-len('_load_actual_entsoe_transparency')])
.dropna(how="all", axis=0))
elif source == 'ENTSOE_power_statistics':
load = (pd.read_csv(fn, index_col=0, parse_dates=True)
.loc[:, lambda df: df.columns.to_series().str.endswith('_load_actual_entsoe_power_statistics')]
.rename(columns=lambda s: s[:-len('_load_actual_entsoe_power_statistics')])
.dropna(how="all", axis=0))
else:
logger.warning("Please proviede correct source name for load data")
if 'GB_UKM' in load.columns:
load.rename(columns={'GB_UKM' : 'GB'}, inplace=True)
load = load.filter(items=countries)
if years is not None:
load = load.loc[years]
return load
|
4,772 |
def ex1():
plt.figure(1)
ax = plt.axes([0, 0, 1, 1])
#ax = plt.subplot(111)
ax.set_yticks([0.5])
ax.set_yticklabels(["very long label"])
make_axes_area_auto_adjustable(ax)
|
def ex1():
plt.figure()
ax = plt.axes([0, 0, 1, 1])
#ax = plt.subplot(111)
ax.set_yticks([0.5])
ax.set_yticklabels(["very long label"])
make_axes_area_auto_adjustable(ax)
|
40,059 |
def main():
parser = argparse.ArgumentParser(
description="Security analysis of Ethereum smart contracts"
)
parser.add_argument("solidity_file", nargs="*")
commands = parser.add_argument_group("commands")
commands.add_argument("-g", "--graph", help="generate a control flow graph")
commands.add_argument(
"-V",
"--version",
action="store_true",
help="print the Mythril version number and exit",
)
commands.add_argument(
"-x",
"--fire-lasers",
action="store_true",
help="detect vulnerabilities, use with -c, -a or solidity file(s)",
)
commands.add_argument(
"--truffle",
action="store_true",
help="analyze a truffle project (run from project dir)",
)
commands.add_argument(
"-d", "--disassemble", action="store_true", help="print disassembly"
)
commands.add_argument(
"-j",
"--statespace-json",
help="dumps the statespace json",
metavar="OUTPUT_FILE",
)
inputs = parser.add_argument_group("input arguments")
inputs.add_argument(
"-c",
"--code",
help='hex-encoded bytecode string ("6060604052...")',
metavar="BYTECODE",
)
inputs.add_argument(
"-f",
"--codefile",
help="file containing hex-encoded bytecode string",
metavar="BYTECODEFILE",
type=argparse.FileType("r"),
)
inputs.add_argument(
"-a",
"--address",
help="pull contract from the blockchain",
metavar="CONTRACT_ADDRESS",
)
inputs.add_argument(
"-l",
"--dynld",
action="store_true",
help="auto-load dependencies from the blockchain",
)
inputs.add_argument(
"--no-onchain-storage-access",
action="store_true",
help="turns off getting the data from onchain contracts",
)
inputs.add_argument(
"--bin-runtime",
action="store_true",
help="Only when -c or -f is used. Consider the input bytecode as binary runtime code, default being the contract creation bytecode.",
)
outputs = parser.add_argument_group("output formats")
outputs.add_argument(
"-o",
"--outform",
choices=["text", "markdown", "json"],
default="text",
help="report output format",
metavar="<text/markdown/json>",
)
outputs.add_argument(
"--verbose-report",
action="store_true",
help="Include debugging information in report",
)
database = parser.add_argument_group("local contracts database")
database.add_argument(
"-s", "--search", help="search the contract database", metavar="EXPRESSION"
)
database.add_argument(
"--leveldb-dir",
help="specify leveldb directory for search or direct access operations",
metavar="LEVELDB_PATH",
)
utilities = parser.add_argument_group("utilities")
utilities.add_argument(
"--hash", help="calculate function signature hash", metavar="SIGNATURE"
)
utilities.add_argument(
"--storage",
help="read state variables from storage index, use with -a",
metavar="INDEX,NUM_SLOTS,[array] / mapping,INDEX,[KEY1, KEY2...]",
)
utilities.add_argument(
"--solv",
help="specify solidity compiler version. If not present, will try to install it (Experimental)",
metavar="SOLV",
)
utilities.add_argument(
"--contract-hash-to-address",
help="returns corresponding address for a contract address hash",
metavar="SHA3_TO_LOOK_FOR",
)
options = parser.add_argument_group("options")
options.add_argument(
"-m",
"--modules",
help="Comma-separated list of security analysis modules",
metavar="MODULES",
)
options.add_argument(
"--max-depth",
type=int,
default=22,
help="Maximum recursion depth for symbolic execution",
)
options.add_argument(
"--strategy",
choices=["dfs", "bfs", "naive-random", "weighted-random"],
default="dfs",
help="Symbolic execution strategy",
)
options.add_argument(
"-t",
"--transaction-count",
type=int,
default=2,
help="Maximum number of transactions issued by laser",
)
options.add_argument(
"--execution-timeout",
type=int,
default=600,
help="The amount of seconds to spend on symbolic execution",
)
options.add_argument(
"--create-timeout",
type=int,
default=10,
help="The amount of seconds to spend on " "the initial contract creation",
)
options.add_argument("--solc-args", help="Extra arguments for solc")
options.add_argument(
"--phrack", action="store_true", help="Phrack-style call graph"
)
options.add_argument(
"--enable-physics", action="store_true", help="enable graph physics simulation"
)
options.add_argument("-v", type=int, help="log level (0-2)", metavar="LOG_LEVEL")
options.add_argument(
"-q",
"--query-signature",
action="store_true",
help="Lookup function signatures through www.4byte.directory",
)
rpc = parser.add_argument_group("RPC options")
rpc.add_argument(
"--rpc",
help="custom RPC settings",
metavar="HOST:PORT / ganache / infura-[network_name]",
default="infura-mainnet",
)
rpc.add_argument(
"--rpctls", type=bool, default=False, help="RPC connection over TLS"
)
parser.add_argument("--epic", action="store_true", help=argparse.SUPPRESS)
# Get config values
args = parser.parse_args()
if args.epic:
path = os.path.dirname(os.path.realpath(__file__))
sys.argv.remove("--epic")
os.system(" ".join(sys.argv) + " | python3 " + path + "/epic.py")
sys.exit()
if args.version:
if args.outform == "json":
print(json.dumps({"version_str": VERSION}))
else:
print("Mythril version {}".format(VERSION))
sys.exit()
# Parse cmdline args
if not (
args.search
or args.hash
or args.disassemble
or args.graph
or args.fire_lasers
or args.storage
or args.truffle
or args.statespace_json
or args.contract_hash_to_address
):
parser.print_help()
sys.exit()
if args.v:
if 0 <= args.v < 3:
coloredlogs.install(
fmt="%(name)s[%(process)d] %(levelname)s %(message)s",
level=[logging.NOTSET, logging.INFO, logging.DEBUG][args.v],
)
else:
exit_with_error(
args.outform, "Invalid -v value, you can find valid values in usage"
)
if args.query_signature:
if sigs.ethereum_input_decoder == None:
exit_with_error(
args.outform,
"The --query-signature function requires the python package ethereum-input-decoder",
)
# -- commands --
if args.hash:
print(Mythril.hash_for_function_signature(args.hash))
sys.exit()
try:
# the mythril object should be our main interface
# infura = None, rpc = None, rpctls = None
# solc_args = None, dynld = None, max_recursion_depth = 12):
mythril = Mythril(
solv=args.solv,
dynld=args.dynld,
onchain_storage_access=(not args.no_onchain_storage_access),
solc_args=args.solc_args,
enable_online_lookup=args.query_signature,
)
if (
args.dynld
or not args.no_onchain_storage_access
and not (args.rpc or args.i)
):
mythril.set_api_from_config_path()
if args.address:
# Establish RPC connection if necessary
mythril.set_api_rpc(rpc=args.rpc, rpctls=args.rpctls)
elif args.search or args.contract_hash_to_address:
# Open LevelDB if necessary
mythril.set_api_leveldb(
mythril.leveldb_dir if not args.leveldb_dir else args.leveldb_dir
)
if args.search:
# Database search ops
mythril.search_db(args.search)
sys.exit()
if args.contract_hash_to_address:
# search corresponding address
try:
mythril.contract_hash_to_address(args.contract_hash_to_address)
except AddressNotFoundError:
print("Address not found.")
sys.exit()
if args.truffle:
try:
# not really pythonic atm. needs refactoring
mythril.analyze_truffle_project(args)
except FileNotFoundError:
print(
"Build directory not found. Make sure that you start the analysis from the project root, and that 'truffle compile' has executed successfully."
)
sys.exit()
# Load / compile input contracts
address = None
code = None
if args.code:
# Load from bytecode
code = args.code[2:] if args.code.startswith("0x") else args.code
address, _ = mythril.load_from_bytecode(code, args.bin_runtime)
elif args.codefile:
bytecode = "".join([l.strip() for l in args.codefile if len(l.strip()) > 0])
bytecode = bytecode[2:] if bytecode.startswith("0x") else bytecode
address, _ = mythril.load_from_bytecode(bytecode, args.bin_runtime)
elif args.address:
# Get bytecode from a contract address
address, _ = mythril.load_from_address(args.address)
elif args.solidity_file:
# Compile Solidity source file(s)
if args.graph and len(args.solidity_file) > 1:
exit_with_error(
args.outform,
"Cannot generate call graphs from multiple input files. Please do it one at a time.",
)
address, _ = mythril.load_from_solidity(args.solidity_file) # list of files
else:
exit_with_error(
args.outform,
"No input bytecode. Please provide EVM code via -c BYTECODE, -a ADDRESS, or -i SOLIDITY_FILES",
)
# Commands
if args.storage:
if not args.address:
exit_with_error(
args.outform,
"To read storage, provide the address of a deployed contract with the -a option.",
)
storage = mythril.get_state_variable_from_storage(
address=address,
params=[a.strip() for a in args.storage.strip().split(",")],
)
print(storage)
elif args.disassemble:
output = ""
easm_text = mythril.contracts[0].get_easm()
# or mythril.disassemble(mythril.contracts[0])
if easm_text:
output += "Runtime Disassembly: \n" + easm_text
if code:
output += "Disassembly: \n" + mythril.contracts[0].get_creation_easm()
sys.stdout.write(output)
elif args.graph or args.fire_lasers:
if not mythril.contracts:
exit_with_error(
args.outform, "input files do not contain any valid contracts"
)
if args.graph:
html = mythril.graph_html(
strategy=args.strategy,
contract=mythril.contracts[0],
address=address,
enable_physics=args.enable_physics,
phrackify=args.phrack,
max_depth=args.max_depth,
execution_timeout=args.execution_timeout,
create_timeout=args.create_timeout,
)
try:
with open(args.graph, "w") as f:
f.write(html)
except Exception as e:
exit_with_error(args.outform, "Error saving graph: " + str(e))
else:
try:
report = mythril.fire_lasers(
strategy=args.strategy,
address=address,
modules=[m.strip() for m in args.modules.strip().split(",")]
if args.modules
else [],
verbose_report=args.verbose_report,
max_depth=args.max_depth,
execution_timeout=args.execution_timeout,
create_timeout=args.create_timeout,
transaction_count=args.transaction_count,
)
outputs = {
"json": report.as_json(),
"text": report.as_text(),
"markdown": report.as_markdown(),
}
print(outputs[args.outform])
except ModuleNotFoundError as e:
exit_with_error(
args.outform, "Error loading analyis modules: " + format(e)
)
elif args.statespace_json:
if not mythril.contracts:
exit_with_error(
args.outform, "input files do not contain any valid contracts"
)
statespace = mythril.dump_statespace(
strategy=args.strategy,
contract=mythril.contracts[0],
address=address,
max_depth=args.max_depth,
execution_timeout=args.execution_timeout,
create_timeout=args.create_timeout,
)
try:
with open(args.statespace_json, "w") as f:
json.dump(statespace, f)
except Exception as e:
exit_with_error(args.outform, "Error saving json: " + str(e))
else:
parser.print_help()
except CriticalError as ce:
exit_with_error(args.outform, str(ce))
|
def main():
parser = argparse.ArgumentParser(
description="Security analysis of Ethereum smart contracts"
)
parser.add_argument("solidity_file", nargs="*")
commands = parser.add_argument_group("commands")
commands.add_argument("-g", "--graph", help="generate a control flow graph")
commands.add_argument(
"-V",
"--version",
action="store_true",
help="print the Mythril version number and exit",
)
commands.add_argument(
"-x",
"--fire-lasers",
action="store_true",
help="detect vulnerabilities, use with -c, -a or solidity file(s)",
)
commands.add_argument(
"--truffle",
action="store_true",
help="analyze a truffle project (run from project dir)",
)
commands.add_argument(
"-d", "--disassemble", action="store_true", help="print disassembly"
)
commands.add_argument(
"-j",
"--statespace-json",
help="dumps the statespace json",
metavar="OUTPUT_FILE",
)
inputs = parser.add_argument_group("input arguments")
inputs.add_argument(
"-c",
"--code",
help='hex-encoded bytecode string ("6060604052...")',
metavar="BYTECODE",
)
inputs.add_argument(
"-f",
"--codefile",
help="file containing hex-encoded bytecode string",
metavar="BYTECODEFILE",
type=argparse.FileType("r"),
)
inputs.add_argument(
"-a",
"--address",
help="pull contract from the blockchain",
metavar="CONTRACT_ADDRESS",
)
inputs.add_argument(
"-l",
"--dynld",
action="store_true",
help="auto-load dependencies from the blockchain",
)
inputs.add_argument(
"--no-onchain-storage-access",
action="store_true",
help="turns off getting the data from onchain contracts",
)
inputs.add_argument(
"--bin-runtime",
action="store_true",
help="Only when -c or -f is used. Consider the input bytecode as binary runtime code, default being the contract creation bytecode.",
)
outputs = parser.add_argument_group("output formats")
outputs.add_argument(
"-o",
"--outform",
choices=["text", "markdown", "json"],
default="text",
help="report output format",
metavar="<text/markdown/json>",
)
outputs.add_argument(
"--verbose-report",
action="store_true",
help="Include debugging information in report",
)
database = parser.add_argument_group("local contracts database")
database.add_argument(
"-s", "--search", help="search the contract database", metavar="EXPRESSION"
)
database.add_argument(
"--leveldb-dir",
help="specify leveldb directory for search or direct access operations",
metavar="LEVELDB_PATH",
)
utilities = parser.add_argument_group("utilities")
utilities.add_argument(
"--hash", help="calculate function signature hash", metavar="SIGNATURE"
)
utilities.add_argument(
"--storage",
help="read state variables from storage index, use with -a",
metavar="INDEX,NUM_SLOTS,[array] / mapping,INDEX,[KEY1, KEY2...]",
)
utilities.add_argument(
"--solv",
help="specify solidity compiler version. If not present, will try to install it (Experimental)",
metavar="SOLV",
)
utilities.add_argument(
"--contract-hash-to-address",
help="returns corresponding address for a contract address hash",
metavar="SHA3_TO_LOOK_FOR",
)
options = parser.add_argument_group("options")
options.add_argument(
"-m",
"--modules",
help="Comma-separated list of security analysis modules",
metavar="MODULES",
)
options.add_argument(
"--max-depth",
type=int,
default=22,
help="Maximum recursion depth for symbolic execution",
)
options.add_argument(
"--strategy",
choices=["dfs", "bfs", "naive-random", "weighted-random"],
default="dfs",
help="Symbolic execution strategy",
)
options.add_argument(
"-t",
"--transaction-count",
type=int,
default=2,
help="Maximum number of transactions issued by laser",
)
options.add_argument(
"--execution-timeout",
type=int,
default=600,
help="The amount of seconds to spend on symbolic execution",
)
options.add_argument(
"--create-timeout",
type=int,
default=10,
help="The amount of seconds to spend on " "the initial contract creation",
)
options.add_argument("--solc-args", help="Extra arguments for solc")
options.add_argument(
"--phrack", action="store_true", help="Phrack-style call graph"
)
options.add_argument(
"--enable-physics", action="store_true", help="enable graph physics simulation"
)
options.add_argument("-v", type=int, help="log level (0-2)", metavar="LOG_LEVEL")
options.add_argument(
"-q",
"--query-signature",
action="store_true",
help="Lookup function signatures through www.4byte.directory",
)
rpc = parser.add_argument_group("RPC options")
rpc.add_argument(
"--rpc",
help="custom RPC settings",
metavar="HOST:PORT / ganache / infura-[network_name]",
default="infura-mainnet",
)
rpc.add_argument(
"--rpctls", type=bool, default=False, help="RPC connection over TLS"
)
parser.add_argument("--epic", action="store_true", help=argparse.SUPPRESS)
# Get config values
args = parser.parse_args()
if args.epic:
path = os.path.dirname(os.path.realpath(__file__))
sys.argv.remove("--epic")
os.system(" ".join(sys.argv) + " | python3 " + path + "/epic.py")
sys.exit()
if args.version:
if args.outform == "json":
print(json.dumps({"version_str": VERSION}))
else:
print("Mythril version {}".format(VERSION))
sys.exit()
# Parse cmdline args
if not (
args.search
or args.hash
or args.disassemble
or args.graph
or args.fire_lasers
or args.storage
or args.truffle
or args.statespace_json
or args.contract_hash_to_address
):
parser.print_help()
sys.exit()
if args.v:
if 0 <= args.v < 3:
coloredlogs.install(
fmt="%(name)s[%(process)d] %(levelname)s %(message)s",
level=[logging.NOTSET, logging.INFO, logging.DEBUG][args.v],
)
else:
exit_with_error(
args.outform, "Invalid -v value, you can find valid values in usage"
)
if args.query_signature:
if sigs.ethereum_input_decoder == None:
exit_with_error(
args.outform,
"The --query-signature function requires the python package ethereum-input-decoder",
)
# -- commands --
if args.hash:
print(Mythril.hash_for_function_signature(args.hash))
sys.exit()
try:
# the mythril object should be our main interface
# infura = None, rpc = None, rpctls = None
# solc_args = None, dynld = None, max_recursion_depth = 12):
mythril = Mythril(
solv=args.solv,
dynld=args.dynld,
onchain_storage_access=(not args.no_onchain_storage_access),
solc_args=args.solc_args,
enable_online_lookup=args.query_signature,
)
if (
args.dynld
or not args.no_onchain_storage_access
and not (args.rpc or args.i)
):
mythril.set_api_from_config_path()
if args.address:
# Establish RPC connection if necessary
mythril.set_api_rpc(rpc=args.rpc, rpctls=args.rpctls)
elif args.search or args.contract_hash_to_address:
# Open LevelDB if necessary
mythril.set_api_leveldb(
mythril.leveldb_dir if not args.leveldb_dir else args.leveldb_dir
)
if args.search:
# Database search ops
mythril.search_db(args.search)
sys.exit()
if args.contract_hash_to_address:
# search corresponding address
try:
mythril.contract_hash_to_address(args.contract_hash_to_address)
except AddressNotFoundError:
print("Address not found.")
sys.exit()
if args.truffle:
try:
# not really pythonic atm. needs refactoring
mythril.analyze_truffle_project(args)
except FileNotFoundError:
print(
"Build directory not found. Make sure that you start the analysis from the project root, and that 'truffle compile' has executed successfully."
)
sys.exit()
# Load / compile input contracts
address = None
code = None
if args.code:
# Load from bytecode
code = args.code[2:] if args.code.startswith("0x") else args.code
address, _ = mythril.load_from_bytecode(code, args.bin_runtime)
elif args.codefile:
bytecode = "".join([l.strip() for l in args.codefile if len(l.strip()) > 0])
bytecode = bytecode[2:] if bytecode.startswith("0x") else bytecode
address, _ = mythril.load_from_bytecode(bytecode, args.bin_runtime)
elif args.address:
# Get bytecode from a contract address
address, _ = mythril.load_from_address(args.address)
elif args.solidity_file:
# Compile Solidity source file(s)
if args.graph and len(args.solidity_file) > 1:
exit_with_error(
args.outform,
"Cannot generate call graphs from multiple input files. Please do it one at a time.",
)
address, _ = mythril.load_from_solidity(args.solidity_file) # list of files
else:
exit_with_error(
args.outform,
"No input bytecode. Please provide EVM code via -c BYTECODE, -a ADDRESS, or -i SOLIDITY_FILES",
)
# Commands
if args.storage:
if not args.address:
exit_with_error(
args.outform,
"To read storage, provide the address of a deployed contract with the -a option.",
)
storage = mythril.get_state_variable_from_storage(
address=address,
params=[a.strip() for a in args.storage.strip().split(",")],
)
print(storage)
elif args.disassemble:
output = ""
easm_text = mythril.contracts[0].get_easm()
# or mythril.disassemble(mythril.contracts[0])
if easm_text:
output += "Runtime Disassembly: \n" + easm_text
if mythril.contracts[0].creation_code:
output += "Disassembly: \n" + mythril.contracts[0].get_creation_easm()
sys.stdout.write(output)
elif args.graph or args.fire_lasers:
if not mythril.contracts:
exit_with_error(
args.outform, "input files do not contain any valid contracts"
)
if args.graph:
html = mythril.graph_html(
strategy=args.strategy,
contract=mythril.contracts[0],
address=address,
enable_physics=args.enable_physics,
phrackify=args.phrack,
max_depth=args.max_depth,
execution_timeout=args.execution_timeout,
create_timeout=args.create_timeout,
)
try:
with open(args.graph, "w") as f:
f.write(html)
except Exception as e:
exit_with_error(args.outform, "Error saving graph: " + str(e))
else:
try:
report = mythril.fire_lasers(
strategy=args.strategy,
address=address,
modules=[m.strip() for m in args.modules.strip().split(",")]
if args.modules
else [],
verbose_report=args.verbose_report,
max_depth=args.max_depth,
execution_timeout=args.execution_timeout,
create_timeout=args.create_timeout,
transaction_count=args.transaction_count,
)
outputs = {
"json": report.as_json(),
"text": report.as_text(),
"markdown": report.as_markdown(),
}
print(outputs[args.outform])
except ModuleNotFoundError as e:
exit_with_error(
args.outform, "Error loading analyis modules: " + format(e)
)
elif args.statespace_json:
if not mythril.contracts:
exit_with_error(
args.outform, "input files do not contain any valid contracts"
)
statespace = mythril.dump_statespace(
strategy=args.strategy,
contract=mythril.contracts[0],
address=address,
max_depth=args.max_depth,
execution_timeout=args.execution_timeout,
create_timeout=args.create_timeout,
)
try:
with open(args.statespace_json, "w") as f:
json.dump(statespace, f)
except Exception as e:
exit_with_error(args.outform, "Error saving json: " + str(e))
else:
parser.print_help()
except CriticalError as ce:
exit_with_error(args.outform, str(ce))
|
32,537 |
def main(): # pragma: no cover
params = demisto.params()
aws_default_region = params.get('defaultRegion')
aws_role_arn = params.get('roleArn')
aws_role_session_name = params.get('roleSessionName')
aws_role_session_duration = params.get('sessionDuration')
aws_role_policy = None
aws_access_key_id = params.get('access_key')
aws_secret_access_key = params.get('secret_key')
verify_certificate = not params.get('insecure', True)
timeout = params.get('timeout') or 1
retries = params.get('retries') or 5
aws_gd_severity = params.get('gd_severity', '')
first_fetch = arg_to_datetime(params.get('first_fetch'))
limit = arg_to_number(params.get('limit'))
try:
validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws_access_key_id,
aws_secret_access_key)
# proxy is being handled in AWSClient.
aws_client = AWSClient(aws_default_region, aws_role_arn, aws_role_session_name, aws_role_session_duration,
aws_role_policy, aws_access_key_id, aws_secret_access_key, verify_certificate,
timeout, retries)
client = aws_client.aws_session(service=CLIENT_SERVICE, region=aws_default_region)
command = demisto.command()
if command == 'test-module':
get_events(aws_client=client,
collect_from={},
collect_from_default=first_fetch,
last_ids={},
severity=aws_gd_severity,
limit=1,
detectors_num=1)
return_results('ok')
if command in ('aws-gd-get-events', 'fetch-events'):
events: list = []
if command == 'aws-gd-get-events':
collect_from = arg_to_datetime(demisto.args().get('collect_from', params.get('first_fetch')))
severity = demisto.args().get('severity', aws_gd_severity)
command_limit = arg_to_number(demisto.args().get('limit', limit))
events, new_last_ids, new_collect_from = get_events(
aws_client=client,
collect_from={},
collect_from_default=collect_from,
last_ids={},
severity=severity,
limit=command_limit if command_limit else MAX_RESULTS)
command_results = CommandResults(
readable_output=tableToMarkdown('AWSGuardDuty Logs', events, headerTransform=pascalToSpace),
outputs_prefix='AWSGuardDuty.Logs',
outputs_key_field='event.id',
outputs=events,
raw_response=events,
)
return_results(command_results)
if command == 'fetch-events':
last_run = demisto.getLastRun()
collect_from_dict = last_run.get('collect_from', {})
last_ids = last_run.get('last_ids', {})
events, new_last_ids, new_collect_from_dict = get_events(aws_client=client,
collect_from=collect_from_dict,
collect_from_default=first_fetch,
last_ids=last_ids,
severity=aws_gd_severity,
limit=limit if limit else MAX_RESULTS)
demisto.setLastRun({
'collect_from': new_collect_from_dict,
'last_ids': new_last_ids
})
if argToBoolean(demisto.args().get('should_push_events', 'true')):
send_events_to_xsiam(events, params.get('vendor', 'AWS'), params.get('product', 'GuardDuty'))
elif command != 'test-module':
raise NotImplementedError(f"Command {command} is not implemented.")
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command in AWSGuardDutyEventCollector.\nError:\n{str(e)}'
f'\nTraceback:\n{traceback.format_exc()}')
|
def main(): # pragma: no cover
params = demisto.params()
aws_default_region = params.get('defaultRegion')
aws_role_arn = params.get('roleArn')
aws_role_session_name = params.get('roleSessionName')
aws_role_session_duration = params.get('sessionDuration')
aws_role_policy = None
aws_access_key_id = params.get('access_key')
aws_secret_access_key = params.get('secret_key')
verify_certificate = not params.get('insecure', True)
timeout = params.get('timeout') or 1
retries = params.get('retries') or 5
aws_gd_severity = params.get('gd_severity', '')
first_fetch = arg_to_datetime(params.get('first_fetch'))
limit = arg_to_number(params.get('limit'))
try:
validate_params(aws_default_region, aws_role_arn, aws_role_session_name, aws_access_key_id,
aws_secret_access_key)
# proxy is being handled in AWSClient.
aws_client = AWSClient(aws_default_region, aws_role_arn, aws_role_session_name, aws_role_session_duration,
aws_role_policy, aws_access_key_id, aws_secret_access_key, verify_certificate,
timeout, retries)
client = aws_client.aws_session(service=CLIENT_SERVICE, region=aws_default_region)
command = demisto.command()
if command == 'test-module':
get_events(aws_client=client,
collect_from={},
collect_from_default=first_fetch,
last_ids={},
severity=aws_gd_severity,
limit=1,
detectors_num=1)
return_results('ok')
if command in ('aws-gd-get-events', 'fetch-events'):
events: list = []
if command == 'aws-gd-get-events':
collect_from = arg_to_datetime(demisto.args().get('collect_from', params.get('first_fetch')))
severity = demisto.args().get('severity', aws_gd_severity)
command_limit = arg_to_number(demisto.args().get('limit', limit))
events, new_last_ids, new_collect_from = get_events(
aws_client=client,
collect_from={},
collect_from_default=collect_from,
last_ids={},
severity=severity,
limit=command_limit if command_limit else MAX_RESULTS)
command_results = CommandResults(
readable_output=tableToMarkdown('AWSGuardDuty Logs', events, headerTransform=pascalToSpace),
outputs_prefix='AWSGuardDuty.Logs',
outputs_key_field='event.id',
outputs=events,
raw_response=events,
)
return_results(command_results)
if command == 'fetch-events':
last_run = demisto.getLastRun()
collect_from_dict = last_run.get('collect_from', {})
last_ids = last_run.get('last_ids', {})
events, new_last_ids, new_collect_from_dict = get_events(aws_client=client,
collect_from=collect_from_dict,
collect_from_default=first_fetch,
last_ids=last_ids,
severity=aws_gd_severity,
limit=limit if limit else MAX_RESULTS)
demisto.setLastRun({
'collect_from': new_collect_from_dict,
'last_ids': new_last_ids
})
if argToBoolean(demisto.args().get('should_push_events', 'true')):
send_events_to_xsiam(events, params.get('vendor', 'AWS'), params.get('product', 'GuardDuty'))
elif command != 'test-module':
raise NotImplementedError(f"Command {command} is not implemented.")
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
31,712 |
def fetch_incidents(client: Client, first_fetch: str, dashboard_id: str = None, panel_id: str = None,
alert_name: str = None, state: str = None, max_fetch: int = MAX_INCIDENTS_TO_FETCH) -> List[dict]:
last_fetch = demisto.getLastRun().get('last_fetch', None)
fetch_time = calculate_fetch_start_time(last_fetch, first_fetch)
demisto.debug(f'last fetch was at: {last_fetch}, time to fetch from is: {fetch_time}')
alerts = client.alerts_list_request(dashboard_id=argToList(dashboard_id), panel_id=panel_id, query=alert_name,
state=argToList(state))
last_fetch, incidents = parse_alerts(alerts, max_fetch, fetch_time)
demisto.debug(f'last fetch now is: {last_fetch}, number of incidents fetched is {len(incidents)}')
demisto.setLastRun({'last_fetch': str(date_to_timestamp(last_fetch, DATE_FORMAT))})
return incidents
|
def fetch_incidents(client: Client, first_fetch: str, dashboard_id: str = None, panel_id: str = None,
alert_name: str = None, state: str = None, max_fetch: int = MAX_INCIDENTS_TO_FETCH) -> List[dict]:
last_fetch = demisto.getLastRun().get('last_fetch', None)
fetch_start_time = calculate_fetch_start_time(last_fetch, first_fetch)
demisto.debug(f'last fetch was at: {last_fetch}, time to fetch from is: {fetch_time}')
alerts = client.alerts_list_request(dashboard_id=argToList(dashboard_id), panel_id=panel_id, query=alert_name,
state=argToList(state))
last_fetch, incidents = parse_alerts(alerts, max_fetch, fetch_time)
demisto.debug(f'last fetch now is: {last_fetch}, number of incidents fetched is {len(incidents)}')
demisto.setLastRun({'last_fetch': str(date_to_timestamp(last_fetch, DATE_FORMAT))})
return incidents
|
29,307 |
def apply_change_list(collection_id, change_list):
"""Applies a changelist to a pristine collection and returns the result.
Args:
collection_id: str. ID of the given collection.
change_list: list(dict). A change list to be applied to the given
collection. Each entry is a dict that represents a CollectionChange
object.
Returns:
Collection. The resulting collection domain object.
Raises:
Exception. The change_list is not applicable on given collection_id.
"""
collection = get_collection_by_id(collection_id)
try:
changes = [
collection_domain.CollectionChange(change_dict)
for change_dict in change_list
]
for change in changes:
if change.cmd == collection_domain.CMD_ADD_COLLECTION_NODE:
collection.add_node(change.exploration_id)
elif change.cmd == collection_domain.CMD_DELETE_COLLECTION_NODE:
collection.delete_node(change.exploration_id)
elif change.cmd == collection_domain.CMD_SWAP_COLLECTION_NODES:
collection.swap_nodes(change.first_index, change.second_index)
elif change.cmd == collection_domain.CMD_EDIT_COLLECTION_PROPERTY:
if (change.property_name ==
collection_domain.COLLECTION_PROPERTY_TITLE):
collection.update_title(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_CATEGORY):
collection.update_category(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_OBJECTIVE):
collection.update_objective(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_LANGUAGE_CODE):
collection.update_language_code(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_TAGS):
collection.update_tags(change.new_value)
elif (change.cmd ==
collection_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION):
# Loading the collection model from the datastore into an
# Collection domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# collection is sufficient to apply the schema migration.
continue
return collection
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, collection_id, change_list)
)
raise e
|
def apply_change_list(collection_id, change_list):
"""Applies a changelist to a pristine collection and returns the result.
Args:
collection_id: str. ID of the given collection.
change_list: list(dict). A change list to be applied to the given
collection. Each entry is a dict that represents a CollectionChange
object.
Returns:
Collection. The resulting collection domain object.
Raises:
Exception. The change list is not applicable on the given collection.
"""
collection = get_collection_by_id(collection_id)
try:
changes = [
collection_domain.CollectionChange(change_dict)
for change_dict in change_list
]
for change in changes:
if change.cmd == collection_domain.CMD_ADD_COLLECTION_NODE:
collection.add_node(change.exploration_id)
elif change.cmd == collection_domain.CMD_DELETE_COLLECTION_NODE:
collection.delete_node(change.exploration_id)
elif change.cmd == collection_domain.CMD_SWAP_COLLECTION_NODES:
collection.swap_nodes(change.first_index, change.second_index)
elif change.cmd == collection_domain.CMD_EDIT_COLLECTION_PROPERTY:
if (change.property_name ==
collection_domain.COLLECTION_PROPERTY_TITLE):
collection.update_title(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_CATEGORY):
collection.update_category(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_OBJECTIVE):
collection.update_objective(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_LANGUAGE_CODE):
collection.update_language_code(change.new_value)
elif (change.property_name ==
collection_domain.COLLECTION_PROPERTY_TAGS):
collection.update_tags(change.new_value)
elif (change.cmd ==
collection_domain.CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION):
# Loading the collection model from the datastore into an
# Collection domain object automatically converts it to use the
# latest schema version. As a result, simply resaving the
# collection is sufficient to apply the schema migration.
continue
return collection
except Exception as e:
logging.error(
'%s %s %s %s' % (
e.__class__.__name__, e, collection_id, change_list)
)
raise e
|
32,858 |
def traced__register_error_handler(wrapped, instance, args, kwargs):
"""Wrapper to trace all functions registered with flask.app.register_error_handler"""
def _wrap(key, code_or_exception, f):
return wrapped(key, code_or_exception, wrap_function(instance, f))
return _wrap(*args, **kwargs)
|
def traced__register_error_handler(wrapped, instance, args, kwargs):
"""Wrapper to trace all functions registered with flask.app._register_error_handler"""
def _wrap(key, code_or_exception, f):
return wrapped(key, code_or_exception, wrap_function(instance, f))
return _wrap(*args, **kwargs)
|
7,213 |
def rgba2gray(rgba, background=(1, 1, 1)):
"""Compute luminance of an RGBA image.
Parameters
----------
rgba : array_like
The image in RGBA format, in a 3-D array of shape ``(.., .., 4)``.
background : array_like
The color of the background to blend the image with. A tuple
containing 3 floats between 0 to 1 - the RGB value of the background.
Returns
-------
out : ndarray
The luminance image - an array which is the same size as the input
array, but with the channel dimension removed.
Raises
------
ValueError
If `rgba` is not a 3-D array of shape ``(.., .., 4)``.
Examples
--------
>>> from skimage.color import rgba2gray
>>> from skimage import data
>>> img = data.logo()
>>> img_gray = rgba2gray(img)
"""
return rgb2gray(rgba2rgb(rgba, background))
|
def rgba2gray(rgba, *, background=(1, 1, 1)):
"""Compute luminance of an RGBA image.
Parameters
----------
rgba : array_like
The image in RGBA format, in a 3-D array of shape ``(.., .., 4)``.
background : array_like
The color of the background to blend the image with. A tuple
containing 3 floats between 0 to 1 - the RGB value of the background.
Returns
-------
out : ndarray
The luminance image - an array which is the same size as the input
array, but with the channel dimension removed.
Raises
------
ValueError
If `rgba` is not a 3-D array of shape ``(.., .., 4)``.
Examples
--------
>>> from skimage.color import rgba2gray
>>> from skimage import data
>>> img = data.logo()
>>> img_gray = rgba2gray(img)
"""
return rgb2gray(rgba2rgb(rgba, background))
|
28,300 |
def add_parameter(method):
"""
A decorator function that wraps a method of an Instrument subclass such that the
new method will be converted into the corresponding :code:`param_class`
in the :code:`_add_params_from_decorated_methods`.
Args:
method: The method to be wrapped and flagged to be converted to parameter.
"""
if DECORATED_METHOD_PREFIX not in method.__name__:
raise ValueError(
f"Only methods prefixed with '{DECORATED_METHOD_PREFIX}' can be decorated "
f"with this decorator."
)
@wraps(method) # preserves info like `__doc__` and signature
def kwargs_and_doc_container(self, *args, **kwargs):
raise RuntimeError(
f"Method not intended to be called.\n"
f"'{method.__name__}' is a special method used as information container "
f"for creating and assigning parameters to {self}."
)
# special attribute to flag method for conversion to parameter
setattr(kwargs_and_doc_container, ADD_PARAMETER_ATTR_NAME, True)
return kwargs_and_doc_container
|
def add_parameter(method):
"""
A decorator function that wraps a method of an Instrument subclass such that the
new method will be converted into the corresponding :code:`param_class`
in the :code:`_add_params_from_decorated_methods`.
Args:
method: The method to be wrapped and flagged to be converted to parameter.
"""
if DECORATED_METHOD_PREFIX not in method.__name__:
raise ValueError(
f"Only methods prefixed with {DECORATED_METHOD_PREFIX!r} can be decorated "
f"with this decorator."
)
@wraps(method) # preserves info like `__doc__` and signature
def kwargs_and_doc_container(self, *args, **kwargs):
raise RuntimeError(
f"Method not intended to be called.\n"
f"'{method.__name__}' is a special method used as information container "
f"for creating and assigning parameters to {self}."
)
# special attribute to flag method for conversion to parameter
setattr(kwargs_and_doc_container, ADD_PARAMETER_ATTR_NAME, True)
return kwargs_and_doc_container
|
41,607 |
def test_get_401_403_404_and_returns_none(http):
endpoints = {"/{}".format(code): code for code in {401, 403, 404}}
repo = MockHttpRepository(endpoints, http)
for endpoint in endpoints:
assert repo._get(endpoint) is None
|
def test_get_40x_and_returns_none(http):
endpoints = {"/{}".format(code): code for code in {401, 403, 404}}
repo = MockHttpRepository(endpoints, http)
for endpoint in endpoints:
assert repo._get(endpoint) is None
|
8,849 |
def search(*patterns):
"""Decorate a function to be called when a pattern is found in a line.
:param str patterns: one or more regular expression(s)
Each argument is a regular expression which will trigger the function::
@search('hello', 'here')
# will trigger once on "hello you"
# will trigger twice on "hello here"
# will trigger once on "I'm right here!"
This decorator can be used multiple times to add more search rules::
@search('here')
@search('hello')
# will trigger once on "hello you"
# will trigger twice on "hello here" (once per expression)
# will trigger once on "I'm right here!"
If the Sopel instance is in a channel, or sent a PRIVMSG, where a part
of a string matching this expression is said, the function will execute.
Note that captured groups here will be retrievable through the
:class:`~sopel.trigger.Trigger` object later. The match will also contains
the position of the first instance found.
Inside the regular expression, some special directives can be used.
``$nick`` will be replaced with the nick of the bot and ``,`` or ``:``, and
``$nickname`` will be replaced with the nick of the bot::
@search('$nickname')
# will trigger once when the bot's nick is in a trigger
.. versionadded:: 7.1
.. note::
The regex rule will match for the first instance only, starting from
the left of the line, and the function will execute only once per
regular expression.
To match for each time the expression is found, use the :func:`find`
decorator instead. To match only once from the start of the line,
use the :func:`rule` decorator instead.
"""
def add_attribute(function):
if not hasattr(function, "search_rules"):
function.search_rules = []
for value in patterns:
if value not in function.search_rules:
function.search_rules.append(value)
return function
return add_attribute
|
def search(*patterns):
"""Decorate a function to be called when a pattern matches anywhere in a line.
:param str patterns: one or more regular expression(s)
Each argument is a regular expression which will trigger the function::
@search('hello', 'here')
# will trigger once on "hello you"
# will trigger twice on "hello here"
# will trigger once on "I'm right here!"
This decorator can be used multiple times to add more search rules::
@search('here')
@search('hello')
# will trigger once on "hello you"
# will trigger twice on "hello here" (once per expression)
# will trigger once on "I'm right here!"
If the Sopel instance is in a channel, or sent a PRIVMSG, where a part
of a string matching this expression is said, the function will execute.
Note that captured groups here will be retrievable through the
:class:`~sopel.trigger.Trigger` object later. The match will also contains
the position of the first instance found.
Inside the regular expression, some special directives can be used.
``$nick`` will be replaced with the nick of the bot and ``,`` or ``:``, and
``$nickname`` will be replaced with the nick of the bot::
@search('$nickname')
# will trigger once when the bot's nick is in a trigger
.. versionadded:: 7.1
.. note::
The regex rule will match for the first instance only, starting from
the left of the line, and the function will execute only once per
regular expression.
To match for each time the expression is found, use the :func:`find`
decorator instead. To match only once from the start of the line,
use the :func:`rule` decorator instead.
"""
def add_attribute(function):
if not hasattr(function, "search_rules"):
function.search_rules = []
for value in patterns:
if value not in function.search_rules:
function.search_rules.append(value)
return function
return add_attribute
|
32,011 |
def get_defenders(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Retrieve a list of defenders and their information.
Implement the command 'prisma-cloud-compute-defenders-list'.
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-defenders-list command arguments.
Returns:
CommandResults: command-results object.
"""
args["limit"], args["offset"] = parse_limit_and_offset_values(
limit=args.get("limit", "20"), offset=args.get("offset", "0")
)
defenders = client.get_defenders(params=assign_params(**args))
if defenders:
for defender in defenders:
if "lastModified" in defender:
defender["lastModified"] = parse_date_string_format(date_string=defender.get("lastModified"))
table = tableToMarkdown(
name="Defenders Information",
t=[
{
"hostname": defender.get("hostname"),
"version": defender.get("version"),
"cluster": defender.get("cluster"),
"status": f"Connected since {defender.get('lastModified')}"
if defender.get("connected") else f"Disconnected since {defender.get('lastModified')}",
"listener": defender.get("features", {}).get("proxyListenerType")
} for defender in defenders
],
headers=["hostname", "version", "cluster", "status", "listener"],
removeNull=True,
headerTransform=lambda word: word[0].upper() + word[1:]
)
else:
table = "No results found"
return CommandResults(
outputs_prefix="PrismaCloudCompute.DefenderDetails",
outputs_key_field="hostname",
outputs=defenders,
readable_output=table,
raw_response=defenders
)
|
def get_defenders(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Retrieve a list of defenders and their information.
Implement the command 'prisma-cloud-compute-defenders-list'.
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-defenders-list command arguments.
Returns:
CommandResults: command-results object.
"""
args["limit"], args["offset"] = parse_limit_and_offset_values(
limit=args.get("limit", "20"), offset=args.get("offset", "0")
)
defenders = client.get_defenders(params=assign_params(**args))
if defenders:
for defender in defenders:
if "lastModified" in defender:
defender["lastModified"] = parse_date_string_format(date_string=defender.get("lastModified"))
table = tableToMarkdown(
name="Defenders Information",
t=[
{
"hostname": defender.get("hostname"),
"version": defender.get("version"),
"cluster": defender.get("cluster"),
"status": f"Connected since {defender.get('lastModified')}"
if defender.get("connected") else f"Disconnected since {defender.get('lastModified')}",
"listener": defender.get("features", {}).get("proxyListenerType")
} for defender in defenders
],
headers=["hostname", "version", "cluster", "status", "listener"],
removeNull=True,
headerTransform=lambda word: word[0].upper() + word[1:]
)
else:
table = "No results found."
return CommandResults(
outputs_prefix="PrismaCloudCompute.DefenderDetails",
outputs_key_field="hostname",
outputs=defenders,
readable_output=table,
raw_response=defenders
)
|
32,091 |
def get_fetch_run_time_with_look_back(last_run, first_fetch, look_back=0, timezone=0, date_format='%Y-%m-%dT%H:%M:%S'):
"""
Gets the time range for fetch
:type last_run: ``dict``
:param last_run: The LastRun object
:type first_fetch: ``str``
:param first_fetch: The first time to fetch, used in the first fetch
:type look_back: ``int``
:param look_back: The time to look back in fetch in minutes
:type timezone: ``int``
:param timezone: The time zone offset in hours
:type date_format: ``str``
:param date_format: The date format
:return: The time range o fetch in
:rtype: ``Tuple``
"""
last_run_time = last_run and 'time' in last_run and last_run['time']
now = datetime.utcnow() + timedelta(hours=timezone)
if not last_run_time:
last_run_time, _ = parse_date_range(first_fetch)
else:
last_run_time = datetime.strptime(last_run_time, date_format)
if look_back > 0:
if now - last_run_time < timedelta(minutes=look_back):
last_run_time = now - timedelta(minutes=look_back)
return last_run_time.strftime(date_format), now.strftime(date_format)
|
def get_fetch_run_time_with_look_back(last_run, first_fetch, look_back=0, timezone=0, date_format='%Y-%m-%dT%H:%M:%S'):
"""
Gets the time range for fetch
:type last_run: ``dict``
:param last_run: The LastRun object
:type first_fetch: ``str``
:param first_fetch: The first time to fetch, used in the first fetch
:type look_back: ``int``
:param look_back: The time to look back in fetch in minutes
:type timezone: ``int``
:param timezone: The time zone offset in hours
:type date_format: ``str``
:param date_format: The date format
:return: The time range of the creation date for the incidents to fetch in the current run.
:rtype: ``Tuple``
"""
last_run_time = last_run and 'time' in last_run and last_run['time']
now = datetime.utcnow() + timedelta(hours=timezone)
if not last_run_time:
last_run_time, _ = parse_date_range(first_fetch)
else:
last_run_time = datetime.strptime(last_run_time, date_format)
if look_back > 0:
if now - last_run_time < timedelta(minutes=look_back):
last_run_time = now - timedelta(minutes=look_back)
return last_run_time.strftime(date_format), now.strftime(date_format)
|
8,465 |
def Dependencies(lTOC, xtrapath=None, manifest=None, redirects=None):
"""
Expand LTOC to include all the closure of binary dependencies.
`LTOC` is a logical table of contents, ie, a seq of tuples (name, path).
Return LTOC expanded by all the binary dependencies of the entries
in LTOC, except those listed in the module global EXCLUDES
`manifest` may be a winmanifest.Manifest instance for a program manifest, so
that all dependent assemblies of python.exe can be added to the built exe.
`redirects` may be a list. Any assembly redirects found via policy files will
be added to the list as BindingRedirect objects so they can later be used
to modify any manifests that reference the redirected assembly.
"""
# Extract all necessary binary modules from Python eggs to be included
# directly with PyInstaller.
lTOC = _extract_from_egg(lTOC)
# 4 processes may yield up to +40% speed on 2 CPUs
# 2 processes may yield up to +30% speed on 2 CPUs
processes = 2 * os.cpu_count()
pool = multiprocessing.Pool(processes)
if is_win:
# Search for required assemblies and add them to the TOC
paths = [path for name, path, typ in lTOC]
assemblies = pool.map(
functools.partial(getAssemblyFiles, manifest=manifest, redirects=redirects),
paths
)
# getAssemblyFiles returns a list of tuples, so assemblies is a
# list of list of tuples
for assembly in assemblies:
for ftocnm, fn in assembly:
lTOC.append((ftocnm, fn, 'BINARY'))
dataset = collections.deque([(name, path, typ) for (name, path, typ) in lTOC])
while True:
# Breakdown the dataset in chunks as big as the chosen number of processes
# instead of just feeding the whole dataset into process pool
# so that we can keep the "seen" cache in main process only
chunk = []
while (len(chunk) < processes) and len(dataset):
(name, path, typ) = dataset.pop()
if name.upper() in seen:
continue
chunk.append(path)
if not chunk:
break # From while True, no more data
imports = pool.map(
functools.partial(selectImports, xtrapath=xtrapath),
chunk
)
# selectImports returns a list of pairs, so 'imports' is
# a list of lists of pairs
for item_dependencies in imports:
for (lib, npth) in item_dependencies:
if lib.upper() in seen or npth.upper() in seen:
continue
seen.add(npth.upper())
lTOC.append((lib, npth, 'BINARY'))
return lTOC
|
def Dependencies(lTOC, xtrapath=None, manifest=None, redirects=None):
"""
Expand LTOC to include all the closure of binary dependencies.
`LTOC` is a logical table of contents, ie, a seq of tuples (name, path).
Return LTOC expanded by all the binary dependencies of the entries
in LTOC, except those listed in the module global EXCLUDES
`manifest` may be a winmanifest.Manifest instance for a program manifest, so
that all dependent assemblies of python.exe can be added to the built exe.
`redirects` may be a list. Any assembly redirects found via policy files will
be added to the list as BindingRedirect objects so they can later be used
to modify any manifests that reference the redirected assembly.
"""
# Extract all necessary binary modules from Python eggs to be included
# directly with PyInstaller.
lTOC = _extract_from_egg(lTOC)
# 4 processes may yield up to +40% speed on 2 CPUs
# 2 processes may yield up to +30% speed on 2 CPUs
processes = 2 * os.cpu_count()
pool = multiprocessing.Pool(processes)
if is_win:
# Search for required assemblies and add them to the TOC
paths = [path for name, path, typ in lTOC]
assemblies = pool.map(
functools.partial(getAssemblyFiles, manifest=manifest, redirects=redirects),
paths
)
# getAssemblyFiles returns a list of tuples, so assemblies is a
# list of list of tuples
for assembly in assemblies:
for ftocnm, fn in assembly:
lTOC.append((ftocnm, fn, 'BINARY'))
dataset = collections.deque([(name, path, typ) for (name, path, typ) in lTOC])
while True:
# Breakdown the dataset in chunks as big as the chosen number of processes
# instead of just feeding the whole dataset into process pool
# so that we can keep the "seen" cache in main process only
chunk = []
while (len(chunk) < processes) and len(dataset):
(name, path, typ) = dataset.pop()
if name.upper() in seen:
continue
chunk.append(path)
if not chunk:
break # From while True, no more data
imports = pool.map(
functools.partial(selectImports, xtrapath=xtrapath),
chunk
)
# selectImports returns a list of pairs, so 'imports' is
# a list of lists of pairs
for item_dependencies in imports:
for (lib, npth) in item_dependencies:
if lib.upper() in seen or npth.upper() in seen:
continue
seen.add(npth.upper())
lTOC.append((lib, npath, 'BINARY'))
return lTOC
|
17,155 |
def _condition_based_services(
vehicle: MyBMWVehicle, unit_system: UnitSystem
) -> dict[str, Any]:
extra_attributes = {}
for report in vehicle.condition_based_services.messages:
if (
report.service_type not in ALLOWED_CONDITION_BASED_SERVICE_KEYS
and report.service_type not in LOGGED_CONDITION_BASED_SERVICE_WARINGS
):
_LOGGER.warning(
"'%s' not an allowed condition based service (%s)",
report.service_type,
report,
)
LOGGED_CONDITION_BASED_SERVICE_WARINGS.add(report.service_type)
continue
extra_attributes.update(_format_cbs_report(report, unit_system))
return extra_attributes
|
def _condition_based_services(
vehicle: MyBMWVehicle, unit_system: UnitSystem
) -> dict[str, Any]:
extra_attributes = {}
for report in vehicle.condition_based_services.messages:
if (
report.service_type not in ALLOWED_CONDITION_BASED_SERVICE_KEYS
and report.service_type not in LOGGED_CONDITION_BASED_SERVICE_WARINGS
):
_LOGGER.warning(
"'%s' not an allowed condition based service (%s)",
report.service_type,
report,
)
LOGGED_CONDITION_BASED_SERVICE_WARNINGS.add(report.service_type)
continue
extra_attributes.update(_format_cbs_report(report, unit_system))
return extra_attributes
|
22,093 |
def _is_ntlm_message(message):
"""
Checks whether the given string is an NTLM message
"""
if message[:4].lower() == 'ntlm' and message.split(' ', 1)[1:]:
return True
if message[:9].lower() == 'negotiate':
message = message.split(' ', 1)
if message[1:]:
return utils.decode_b64(message[1].encode())[:7] == b'NTLMSSP'
return False
|
def _is_ntlm_message(message):
"""
Checks whether the given string is an NTLM message
"""
if message[:4].lower() == 'ntlm' and message[4:].strip():
return True
if message[:9].lower() == 'negotiate':
message = message.split(' ', 1)
if message[1:]:
return utils.decode_b64(message[1].encode())[:7] == b'NTLMSSP'
return False
|
12,275 |
def read_qasm(qasm_input, mode="qiskit", version="2.0", strmode=False):
'''
Read OpenQASM intermediate representation
(https://github.com/Qiskit/openqasm) and return
a QubitCircuit and state inputs as specified in the
QASM file.
Parameters
----------
qasm_input : str
File location or String Input for QASM file to be imported. In case of
string input, the parameter strmode must be True.
mode : str
QASM mode to be read in. When mode is "qiskit",
the "qelib1.inc" include is automatically included,
without checking externally. Otherwise, each include is
processed.
version : str
QASM version of the QASM file. Only version 2.0 is currently supported.
strmode : bool
if specified as True, indicates that qasm_input is in string format
rather than from file.
Returns
-------
qc : QubitCircuit
Returns QubitCircuit specified in the QASM file.
'''
if strmode:
qasm_lines = qasm_input.splitlines()
else:
f = open(qasm_input, "r")
qasm_lines = f.read().splitlines()
f.close()
# split input into lines and ignore comments
qasm_lines = [line.strip() for line in qasm_lines]
qasm_lines = list(filter(lambda x: x[:2] != "//" and x != "", qasm_lines))
if version != "2.0":
raise NotImplementedError("QASM: Only OpenQASM 2.0 \
is currently supported.")
if qasm_lines.pop(0) != "OPENQASM 2.0;":
raise SyntaxError("QASM: File does not contain QASM 2.0 header")
qasm_obj = QasmProcessor(qasm_lines, mode=mode, version=version)
qasm_obj.commands = _tokenize(qasm_obj.commands)
qasm_obj._process_includes()
qasm_obj._initialize_pass()
qc = QubitCircuit(qasm_obj.num_qubits, num_cbits=qasm_obj.num_cbits)
qasm_obj._final_pass(qc)
return qc
|
def read_qasm(qasm_input, mode="qiskit", version="2.0", strmode=False):
'''
Read OpenQASM intermediate representation
(https://github.com/Qiskit/openqasm) and return
a QubitCircuit and state inputs as specified in the
QASM file.
Parameters
----------
qasm_input : str
File location or String Input for QASM file to be imported. In case of
string input, the parameter strmode must be True.
mode : str
QASM mode to be read in. When mode is "qiskit",
the "qelib1.inc" include is automatically included,
without checking externally. Otherwise, each include is
processed.
version : str
QASM version of the QASM file. Only version 2.0 is currently supported.
strmode : bool
if specified as True, indicates that qasm_input is in string format
rather than from file.
Returns
-------
qc : QubitCircuit
Returns a :class:`.QubitCircuit` object specified in the QASM file.
'''
if strmode:
qasm_lines = qasm_input.splitlines()
else:
f = open(qasm_input, "r")
qasm_lines = f.read().splitlines()
f.close()
# split input into lines and ignore comments
qasm_lines = [line.strip() for line in qasm_lines]
qasm_lines = list(filter(lambda x: x[:2] != "//" and x != "", qasm_lines))
if version != "2.0":
raise NotImplementedError("QASM: Only OpenQASM 2.0 \
is currently supported.")
if qasm_lines.pop(0) != "OPENQASM 2.0;":
raise SyntaxError("QASM: File does not contain QASM 2.0 header")
qasm_obj = QasmProcessor(qasm_lines, mode=mode, version=version)
qasm_obj.commands = _tokenize(qasm_obj.commands)
qasm_obj._process_includes()
qasm_obj._initialize_pass()
qc = QubitCircuit(qasm_obj.num_qubits, num_cbits=qasm_obj.num_cbits)
qasm_obj._final_pass(qc)
return qc
|
5,917 |
def test_install_package_with_target(script):
"""
Test installing a package using pip install --target
"""
target_dir = script.scratch_path / 'target'
result = script.pip_install_local('-t', target_dir, "simple==1.0")
result.did_create(
Path('scratch') / 'target' / 'simple'
)
# Test repeated call without --upgrade, no files should have changed
result = script.pip_install_local(
'-t', target_dir, "simple==1.0", expect_stderr=True,
)
result.did_not_update(
Path('scratch') / 'target' / 'simple'
)
# Test upgrade call, check that new version is installed
result = script.pip_install_local('--upgrade', '-t',
target_dir, "simple==2.0")
result.did_update(
Path('scratch') / 'target' / 'simple'
)
egg_folder = (
Path('scratch') / 'target' /
'simple-2.0-py{pyversion}.egg-info'.format(**globals()))
result.did_create(egg_folder)
# Test install and upgrade of single-module package
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.0')
singlemodule_py = Path('scratch') / 'target' / 'singlemodule.py'
result.did_create(singlemodule_py)
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.1',
'--upgrade')
result.did_update(singlemodule_py)
|
def test_install_package_with_target(script):
"""
Test installing a package using pip install --target
"""
target_dir = script.scratch_path / 'target'
result = script.pip_install_local('-t', target_dir, "simple==1.0")
result.did_create(Path('scratch') / 'target' / 'simple')
# Test repeated call without --upgrade, no files should have changed
result = script.pip_install_local(
'-t', target_dir, "simple==1.0", expect_stderr=True,
)
result.did_not_update(
Path('scratch') / 'target' / 'simple'
)
# Test upgrade call, check that new version is installed
result = script.pip_install_local('--upgrade', '-t',
target_dir, "simple==2.0")
result.did_update(
Path('scratch') / 'target' / 'simple'
)
egg_folder = (
Path('scratch') / 'target' /
'simple-2.0-py{pyversion}.egg-info'.format(**globals()))
result.did_create(egg_folder)
# Test install and upgrade of single-module package
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.0')
singlemodule_py = Path('scratch') / 'target' / 'singlemodule.py'
result.did_create(singlemodule_py)
result = script.pip_install_local('-t', target_dir, 'singlemodule==0.0.1',
'--upgrade')
result.did_update(singlemodule_py)
|
36,077 |
def export_zip(
entities: Optional[Iterable[Any]] = None,
filename: Optional[str] = None,
use_compression: bool = True,
**traversal_rules: bool,
) -> Tuple[float, ...]:
"""Export in a zipped folder
.. deprecated:: 1.2.1
Support for the parameters `what` and `outfile` will be removed in `v2.0.0`.
Please use `entities` and `filename` instead, respectively.
:param entities: a list of entity instances; they can belong to different models/entities.
:param filename: the filename
(possibly including the absolute path) of the file on which to export.
:param use_compression: Whether or not to compress the zip file.
"""
# Backwards-compatibility
entities = cast(
Iterable[Any],
deprecated_parameters(
old={
'name': 'what',
'value': traversal_rules.pop('what', None)
},
new={
'name': 'entities',
'value': entities
},
),
)
filename = cast(
str,
deprecated_parameters(
old={
'name': 'outfile',
'value': traversal_rules.pop('outfile', None)
},
new={
'name': 'filename',
'value': filename
},
),
)
type_check(
entities,
(list, tuple, set),
msg='`entities` must be specified and given as a list of AiiDA entities',
)
entities = list(entities)
if type_check(filename, str, allow_none=True) is None:
filename = 'export_data.aiida'
with ZipFolder(filename, mode='w', use_compression=use_compression) as folder:
time_start = time.time()
export_tree(entities=entities, folder=folder, **traversal_rules)
time_end = time.time()
return (time_start, time_end)
|
def export_zip(
entities: Optional[Iterable[Any]] = None,
filename: Optional[str] = None,
use_compression: bool = True,
**traversal_rules: bool,
) -> Tuple[float, float]:
"""Export in a zipped folder
.. deprecated:: 1.2.1
Support for the parameters `what` and `outfile` will be removed in `v2.0.0`.
Please use `entities` and `filename` instead, respectively.
:param entities: a list of entity instances; they can belong to different models/entities.
:param filename: the filename
(possibly including the absolute path) of the file on which to export.
:param use_compression: Whether or not to compress the zip file.
"""
# Backwards-compatibility
entities = cast(
Iterable[Any],
deprecated_parameters(
old={
'name': 'what',
'value': traversal_rules.pop('what', None)
},
new={
'name': 'entities',
'value': entities
},
),
)
filename = cast(
str,
deprecated_parameters(
old={
'name': 'outfile',
'value': traversal_rules.pop('outfile', None)
},
new={
'name': 'filename',
'value': filename
},
),
)
type_check(
entities,
(list, tuple, set),
msg='`entities` must be specified and given as a list of AiiDA entities',
)
entities = list(entities)
if type_check(filename, str, allow_none=True) is None:
filename = 'export_data.aiida'
with ZipFolder(filename, mode='w', use_compression=use_compression) as folder:
time_start = time.time()
export_tree(entities=entities, folder=folder, **traversal_rules)
time_end = time.time()
return (time_start, time_end)
|
36,815 |
def object_similarity(obj1, obj2, prop_scores={}, **weight_dict):
"""This method returns a measure of similarity depending on how
similar the two objects are.
Args:
obj1: A stix2 object instance
obj2: A stix2 object instance
prop_scores: A dictionary that can hold individual property scores,
weights, contributing score, matching score and sum of weights.
weight_dict: A dictionary that can be used to override settings
in the semantic equivalence process
Returns:
float: A number between 0.0 and 100.0 as a measurement of similarity.
Warning:
Object types need to have property weights defined for the similarity process.
Otherwise, those objects will not influence the final score. The WEIGHTS
dictionary under `stix2.equivalence.object` can give you an idea on how to add
new entries and pass them via the `weight_dict` argument. Similarly, the values
or methods can be fine tuned for a particular use case.
Note:
Default weight_dict:
.. include:: ../../object_default_sem_eq_weights.rst
Note:
This implementation follows the Semantic Equivalence Committee Note.
see `the Committee Note <link here>`__.
"""
weights = WEIGHTS.copy()
if weight_dict:
weights.update(weight_dict)
type1, type2 = obj1["type"], obj2["type"]
ignore_spec_version = weights["_internal"]["ignore_spec_version"]
if type1 != type2:
raise ValueError('The objects to compare must be of the same type!')
if ignore_spec_version is False and obj1.get("spec_version", "2.0") != obj2.get("spec_version", "2.0"):
raise ValueError('The objects to compare must be of the same spec version!')
try:
weights[type1]
except KeyError:
logger.warning("'%s' type has no 'weights' dict specified & thus no semantic equivalence method to call!", type1)
sum_weights = matching_score = 0
else:
try:
method = weights[type1]["method"]
except KeyError:
logger.debug("Starting semantic equivalence process between: '%s' and '%s'", obj1["id"], obj2["id"])
matching_score = 0.0
sum_weights = 0.0
for prop in weights[type1]:
if check_property_present(prop, obj1, obj2):
w = weights[type1][prop][0]
comp_funct = weights[type1][prop][1]
if comp_funct == partial_timestamp_based:
contributing_score = w * comp_funct(obj1[prop], obj2[prop], weights[type1]["tdelta"])
elif comp_funct == partial_location_distance:
threshold = weights[type1]["threshold"]
contributing_score = w * comp_funct(obj1["latitude"], obj1["longitude"], obj2["latitude"], obj2["longitude"], threshold)
elif comp_funct == reference_check or comp_funct == list_reference_check:
max_depth = weights["_internal"]["max_depth"]
if max_depth > 0:
weights["_internal"]["max_depth"] = max_depth - 1
ds1, ds2 = weights["_internal"]["ds1"], weights["_internal"]["ds2"]
contributing_score = w * comp_funct(obj1[prop], obj2[prop], ds1, ds2, **weights)
weights["_internal"]["max_depth"] = max_depth + 1
else:
continue # prevent excessive recursion
else:
contributing_score = w * comp_funct(obj1[prop], obj2[prop])
sum_weights += w
matching_score += contributing_score
prop_scores[prop] = {
"weight": w,
"contributing_score": contributing_score,
}
logger.debug("'%s' check -- weight: %s, contributing score: %s", prop, w, contributing_score)
prop_scores["matching_score"] = matching_score
prop_scores["sum_weights"] = sum_weights
logger.debug("Matching Score: %s, Sum of Weights: %s", matching_score, sum_weights)
else:
logger.debug("Starting semantic equivalence process between: '%s' and '%s'", obj1["id"], obj2["id"])
try:
matching_score, sum_weights = method(obj1, obj2, prop_scores, **weights[type1])
except TypeError:
# method doesn't support detailed output with prop_scores
matching_score, sum_weights = method(obj1, obj2, **weights[type1])
logger.debug("Matching Score: %s, Sum of Weights: %s", matching_score, sum_weights)
if sum_weights <= 0:
return 0
equivalence_score = (matching_score / sum_weights) * 100.0
return equivalence_score
|
def object_similarity(obj1, obj2, prop_scores={}, **weight_dict):
"""This method returns a measure of similarity depending on how
similar the two objects are.
Args:
obj1: A stix2 object instance
obj2: A stix2 object instance
prop_scores: A dictionary that can hold individual property scores,
weights, contributing score, matching score and sum of weights.
weight_dict: A dictionary that can be used to override settings
in the similarity process
Returns:
float: A number between 0.0 and 100.0 as a measurement of similarity.
Warning:
Object types need to have property weights defined for the similarity process.
Otherwise, those objects will not influence the final score. The WEIGHTS
dictionary under `stix2.equivalence.object` can give you an idea on how to add
new entries and pass them via the `weight_dict` argument. Similarly, the values
or methods can be fine tuned for a particular use case.
Note:
Default weight_dict:
.. include:: ../../object_default_sem_eq_weights.rst
Note:
This implementation follows the Semantic Equivalence Committee Note.
see `the Committee Note <link here>`__.
"""
weights = WEIGHTS.copy()
if weight_dict:
weights.update(weight_dict)
type1, type2 = obj1["type"], obj2["type"]
ignore_spec_version = weights["_internal"]["ignore_spec_version"]
if type1 != type2:
raise ValueError('The objects to compare must be of the same type!')
if ignore_spec_version is False and obj1.get("spec_version", "2.0") != obj2.get("spec_version", "2.0"):
raise ValueError('The objects to compare must be of the same spec version!')
try:
weights[type1]
except KeyError:
logger.warning("'%s' type has no 'weights' dict specified & thus no semantic equivalence method to call!", type1)
sum_weights = matching_score = 0
else:
try:
method = weights[type1]["method"]
except KeyError:
logger.debug("Starting semantic equivalence process between: '%s' and '%s'", obj1["id"], obj2["id"])
matching_score = 0.0
sum_weights = 0.0
for prop in weights[type1]:
if check_property_present(prop, obj1, obj2):
w = weights[type1][prop][0]
comp_funct = weights[type1][prop][1]
if comp_funct == partial_timestamp_based:
contributing_score = w * comp_funct(obj1[prop], obj2[prop], weights[type1]["tdelta"])
elif comp_funct == partial_location_distance:
threshold = weights[type1]["threshold"]
contributing_score = w * comp_funct(obj1["latitude"], obj1["longitude"], obj2["latitude"], obj2["longitude"], threshold)
elif comp_funct == reference_check or comp_funct == list_reference_check:
max_depth = weights["_internal"]["max_depth"]
if max_depth > 0:
weights["_internal"]["max_depth"] = max_depth - 1
ds1, ds2 = weights["_internal"]["ds1"], weights["_internal"]["ds2"]
contributing_score = w * comp_funct(obj1[prop], obj2[prop], ds1, ds2, **weights)
weights["_internal"]["max_depth"] = max_depth + 1
else:
continue # prevent excessive recursion
else:
contributing_score = w * comp_funct(obj1[prop], obj2[prop])
sum_weights += w
matching_score += contributing_score
prop_scores[prop] = {
"weight": w,
"contributing_score": contributing_score,
}
logger.debug("'%s' check -- weight: %s, contributing score: %s", prop, w, contributing_score)
prop_scores["matching_score"] = matching_score
prop_scores["sum_weights"] = sum_weights
logger.debug("Matching Score: %s, Sum of Weights: %s", matching_score, sum_weights)
else:
logger.debug("Starting semantic equivalence process between: '%s' and '%s'", obj1["id"], obj2["id"])
try:
matching_score, sum_weights = method(obj1, obj2, prop_scores, **weights[type1])
except TypeError:
# method doesn't support detailed output with prop_scores
matching_score, sum_weights = method(obj1, obj2, **weights[type1])
logger.debug("Matching Score: %s, Sum of Weights: %s", matching_score, sum_weights)
if sum_weights <= 0:
return 0
equivalence_score = (matching_score / sum_weights) * 100.0
return equivalence_score
|
58,673 |
def get_persistor(name: Text) -> Optional["Persistor"]:
"""Returns an instance of the requested persistor.
Currently, `aws`, `gcs`, `azure` and 'aliyun oss' are supported"""
if name == "aws":
return AWSPersistor(
os.environ.get("BUCKET_NAME"), os.environ.get("AWS_ENDPOINT_URL")
)
if name == "gcs":
return GCSPersistor(os.environ.get("BUCKET_NAME"))
if name == "azure":
return AzurePersistor(
os.environ.get("AZURE_CONTAINER"),
os.environ.get("AZURE_ACCOUNT_NAME"),
os.environ.get("AZURE_ACCOUNT_KEY"),
)
if name == "ali-oss":
from ali_persistor import OssPersistor
return OssPersistor(
access_key_id=os.environ.get("ALI_ACCESS_KEY_ID"),
access_key_secret=os.environ.get("ALI_ACCESS_KEY_SECRET"),
sts_role_arn=os.environ.get("ALI_STS_ROLE_ARN"),
region_id=os.environ.get("ALI_REGION_ID"),
endpoint=os.environ.get("ALI_ENDPOINT"),
bucket_name=os.environ.get("ALI_BUCKET_NAME"),
bucket_path=os.environ.get("ALI_BUCKET_PATH"),
local_path=os.environ.get("ALI_LOCAL_PATH"),
)
return None
|
def get_persistor(name: Text) -> Optional["Persistor"]:
"""Returns an instance of the requested persistor.
Currently, `aws`, `gcs`, `azure` and 'aliyun oss' are supported"""
if name == "aws":
return AWSPersistor(
os.environ.get("BUCKET_NAME"), os.environ.get("AWS_ENDPOINT_URL")
)
if name == "gcs":
return GCSPersistor(os.environ.get("BUCKET_NAME"))
if name == "azure":
return AzurePersistor(
os.environ.get("AZURE_CONTAINER"),
os.environ.get("AZURE_ACCOUNT_NAME"),
os.environ.get("AZURE_ACCOUNT_KEY"),
)
try:
persistor = rasa.utils.common.class_from_module_path(name)
return persistor()
except ImportError:
raise ImportError(f"Unknown model persistor {name}. Please make sure to "
f"either use an included model persistor (`aws`, `gcs` "
f"or `azure`) or specify the module path to an external "
f"model persistor.")
return None
|
22,818 |
def load_http_proxy_from_env(logger: logging.Logger = _default_logger) -> Optional[str]:
proxy_url = (
os.environ.get("HTTPS_PROXY")
or os.environ.get("https_proxy")
or os.environ.get("HTTP_PROXY")
or os.environ.get("http_proxy")
)
if proxy_url is None or len(proxy_url.strip()) == 0:
# If the value is an empty string, the intention should be unsetting it
if len(proxy_url.strip()) == 0:
logger.debug("HTTP proxy env variable is set but empty")
return None
logger.debug(f"HTTP proxy URL has been loaded from an env variable: {proxy_url}")
return proxy_url
|
def load_http_proxy_from_env(logger: logging.Logger = _default_logger) -> Optional[str]:
proxy_url = (
os.environ.get("HTTPS_PROXY")
or os.environ.get("https_proxy")
or os.environ.get("HTTP_PROXY")
or os.environ.get("http_proxy")
)
if proxy_url is None or len(proxy_url.strip()) == 0:
# If the value is an empty string, the intention should be unsetting it
if len(proxy_url.strip()) == 0:
logger.debug("The Slack SDK ignored the proxy env variable as an empty value is set.")
return None
logger.debug(f"HTTP proxy URL has been loaded from an env variable: {proxy_url}")
return proxy_url
|
54,375 |
def test_lock_file_resolves_file_url_symlinks(root: ProjectPackage):
"""
Create directories and file structure as follows:
d1/
d1/testsymlink -> d1/d2/d3
d1/d2/d3/lock_file
d1/d4/source_file
Using the testsymlink as the Locker.lock file path should correctly resolve to
the real physical path of the source_file when calculating the relative path
from the lock_file, i.e. "../../d4/source_file" instead of the unresolved path
from the symlink itself which would have been "../d4/source_file"
See https://github.com/python-poetry/poetry/issues/5849
"""
with tempfile.TemporaryDirectory() as d1:
symlink_path = Path(d1).joinpath("testsymlink")
with tempfile.TemporaryDirectory(dir=d1) as d2, tempfile.TemporaryDirectory(
dir=d1
) as d4, tempfile.TemporaryDirectory(dir=d2) as d3, tempfile.NamedTemporaryFile(
dir=d4
) as source_file, tempfile.NamedTemporaryFile(
dir=d3
) as lock_file:
lock_file.close()
os.symlink(Path(d3), symlink_path)
locker = Locker(str(symlink_path) + os.sep + Path(lock_file.name).name, {})
package_local = Package(
"local-package",
"1.2.3",
source_type="file",
source_url=source_file.name,
source_reference="develop",
source_resolved_reference="123456",
)
packages = [
package_local,
]
locker.set_lock_data(root, packages)
with locker.lock.open(encoding="utf-8") as f:
content = f.read()
expected = f"""\
[[package]]
name = "local-package"
version = "1.2.3"
description = ""
category = "main"
optional = false
python-versions = "*"
[package.source]
type = "file"
url = "{Path(os.path.relpath(Path(source_file.name).resolve().as_posix(),
Path(Path(lock_file.name).parent).resolve().as_posix())).as_posix()}"
reference = "develop"
resolved_reference = "123456"
[metadata]
lock-version = "1.1"
python-versions = "*"
content-hash = "115cf985d932e9bf5f540555bbdd75decbb62cac81e399375fc19f6277f8c1d8"
[metadata.files]
local-package = []
"""
assert content == expected
|
def test_lock_file_resolves_file_url_symlinks(root: ProjectPackage):
"""
Create directories and file structure as follows:
d1/
d1/testsymlink -> d1/d2/d3
d1/d2/d3/lock_file
d1/d4/source_file
Using the testsymlink as the Locker.lock file path should correctly resolve to
the real physical path of the source_file when calculating the relative path
from the lock_file, i.e. "../../d4/source_file" instead of the unresolved path
from the symlink itself which would have been "../d4/source_file"
See https://github.com/python-poetry/poetry/issues/5849
"""
with tempfile.TemporaryDirectory() as d1:
symlink_path = Path(d1).joinpath("testsymlink")
with tempfile.TemporaryDirectory(dir=d1) as d2, tempfile.TemporaryDirectory(
dir=d1
) as d4, tempfile.TemporaryDirectory(dir=d2) as d3, tempfile.NamedTemporaryFile(
dir=d4
) as source_file, tempfile.NamedTemporaryFile(
dir=d3
) as lock_file:
lock_file.close()
try:
os.symlink(Path(d3), symlink_path)
except OSError:
pass
if sys.platform == "win32":
# os.symlink requires either administrative privileges or developer
# mode on Win10, throwing an OSError if neither is active.
# Test is not possible in that case.
return
raise
locker = Locker(str(symlink_path) + os.sep + Path(lock_file.name).name, {})
package_local = Package(
"local-package",
"1.2.3",
source_type="file",
source_url=source_file.name,
source_reference="develop",
source_resolved_reference="123456",
)
packages = [
package_local,
]
locker.set_lock_data(root, packages)
with locker.lock.open(encoding="utf-8") as f:
content = f.read()
expected = f"""\
[[package]]
name = "local-package"
version = "1.2.3"
description = ""
category = "main"
optional = false
python-versions = "*"
[package.source]
type = "file"
url = "{Path(os.path.relpath(Path(source_file.name).resolve().as_posix(),
Path(Path(lock_file.name).parent).resolve().as_posix())).as_posix()}"
reference = "develop"
resolved_reference = "123456"
[metadata]
lock-version = "1.1"
python-versions = "*"
content-hash = "115cf985d932e9bf5f540555bbdd75decbb62cac81e399375fc19f6277f8c1d8"
[metadata.files]
local-package = []
"""
assert content == expected
|
30,043 |
def test_miner_setEtherbase(web3_empty):
web3 = web3_empty
assert web3.eth.coinbase == web3.eth.accounts[0]
new_account = web3.personal.newAccount('this-is-a-password')
with pytest.warns(DeprecationWarning):
web3.geth.miner.setEtherBase(new_account)
assert web3.eth.coinbase == new_account
|
def test_miner_setEtherbase(web3_empty):
web3 = web3_empty
assert web3.eth.coinbase == web3.eth.accounts[0]
new_account = web3.personal.newAccount('this-is-a-password')
with pytest.warns(DeprecationWarning):
web3.geth.miner.setEtherbase(new_account)
assert web3.eth.coinbase == new_account
|
39,098 |
def import_dynamic(package, name, prefix="class"):
"""Import class or method dynamically from package and name.
Args:
package: Where the method or class is located in the import path.
name: Name of method or class.
Returns:
obj: Imported class or method object.
"""
try:
return getattr(importlib.import_module(package), name)
except Exception: # pylint: disable=W0703
LOGGER.error(
'%s "%s.%s" not found, check the package or class name are valid.',
prefix.capitalize(),
package,
name)
sys.exit(1)
|
def import_dynamic(package, name, prefix="class"):
"""Import class or method dynamically from package and name.
Args:
package: Where the method or class is located in the import path.
name: Name of method or class.
Returns:
obj: Imported class or method object.
"""
try:
return getattr(importlib.import_module(package), name)
except Exception: # pylint: disable=W0703
LOGGER.error(
'%s "%s.%s" not found, check the package and class name are valid.',
prefix.capitalize(),
package,
name)
sys.exit(1)
|
4,302 |
def _read_nedf_eeg(filename: str):
"""
Read header info and EEG data from an .nedf file
Parameters
----------
filename : str
Path to the .nedf file.
Returns
-------
eeg : array, shape (n_samples, n_channels)
Unscaled EEG data
info : dict
Information from the file header
triggers : array, shape (n_annots, 2)
Start samples and values of each trigger
scale : float
Scaling factor for the EEG data
"""
info, dt = parse_nedf_header(filename)
# to quote the original matlab implementation:
# "binary data will always start at byte 5120"
binstart = 10240
with open(filename, mode='rb') as f:
f.seek(binstart, os.SEEK_SET)
data = np.fromfile(f, dtype=dt)
# convert uint8-triplet -> float32
eeg = data['data']['eeg'] @ [1 << 16, 1 << 8, 1.]
eeg = eeg.reshape((-1, info['nchan']))
# convert sign if necessary
eeg[eeg > (1 << 23)] -= 1 << 24
triggers = data['data']['trig'].flatten()
triggerind = triggers.nonzero()[0]
triggers = np.stack((triggerind, triggers[triggerind])).T
# scale channels accordingly (here: to volts)
scale = 2.4 / (6.0 * 8388607)
return eeg, info, triggers, scale
|
def _read_nedf_eeg(filename: str):
"""
Read header info and EEG data from an .nedf file
Parameters
----------
filename : str
Path to the .nedf file.
Returns
-------
eeg : array, shape (n_samples, n_channels)
Unscaled EEG data.
info : dict
Information from the file header
triggers : array, shape (n_annots, 2)
Start samples and values of each trigger
scale : float
Scaling factor for the EEG data
"""
info, dt = parse_nedf_header(filename)
# to quote the original matlab implementation:
# "binary data will always start at byte 5120"
binstart = 10240
with open(filename, mode='rb') as f:
f.seek(binstart, os.SEEK_SET)
data = np.fromfile(f, dtype=dt)
# convert uint8-triplet -> float32
eeg = data['data']['eeg'] @ [1 << 16, 1 << 8, 1.]
eeg = eeg.reshape((-1, info['nchan']))
# convert sign if necessary
eeg[eeg > (1 << 23)] -= 1 << 24
triggers = data['data']['trig'].flatten()
triggerind = triggers.nonzero()[0]
triggers = np.stack((triggerind, triggers[triggerind])).T
# scale channels accordingly (here: to volts)
scale = 2.4 / (6.0 * 8388607)
return eeg, info, triggers, scale
|
57,844 |
def format_incidents(events: list, event_offset: int, last_fetch: int, module_name: str,
timestamp_endpoint: str = None) -> Tuple[List[Any], int, int, int]:
"""
This function loops over the alerts list and create incidents from different events.
For `Endpoint Security` and `Kill Chain` modules, if current event name is identical to previous
event name, then both are part of the same incident and we will only update the existing
incident description and won't create a new incident.
Args:
events (list): Events list to create incidents from.
event_offset (int): Event offset.
last_fetch (int): Timestamp in milliseconds on when to start fetching incidents.
module_name (str): Module name.
timestamp_endpoint(str): What API endpoint represent the event timestamp. If None is given,
timestamp endpoint from API can be 'Timestamp' or 'Attack_Timestamp'
Returns:
list: incidents,
int: event_offset,
str: alert_created_time,
int: len(events)
"""
incidents: List[Any] = []
event_counter = 0
offset = event_offset
alert_created_time = last_fetch
max_alert_created_time = alert_created_time
for event in events[offset:]:
# if current event name is identical to previous, then only update incident description.
if (module_name in ('endpoint-security', 'kill-chain')) and \
(incidents and extract_event_name(event, module_name) == incidents[-1].get('name')):
event_offset += 1
last_incident = incidents[-1]
alert_created_time = last_incident.get('occurred')
if module_name == 'endpoint-security':
alert_created_time = int(date_to_timestamp(alert_created_time,
date_format=DATE_FORMAT))
elif module_name == 'kill-chain':
alert_created_time = int(date_to_timestamp(alert_created_time,
date_format=CY_GENERAL_DATE_FORMAT))
step_num = event.get('Scenario_Counter') if module_name == 'endpoint-security' \
else event.get('Stage_Phase')
data = json.loads(last_incident.get('rawJSON'))
current_description = data.get('description')
new_description = f"\nStep {step_num}:\n{extract_event_description(event)}"
data['description'] = f'{current_description}{new_description}' if current_description \
else new_description
# Insert new description to the previous incident.
last_incident['rawJSON'] = json.dumps(data)
incidents[-1] = last_incident
# Keep track on the latest incident timestamp. Events that are part of an incident are
# returned without timestamp so we only use the first the event's timestamp (First step)
if alert_created_time > max_alert_created_time:
max_alert_created_time = alert_created_time
# The current event is new (has new name), then we need to build a new incident.
else:
if event_counter >= min(MAX_INCIDENTS_TO_FETCH,
int(demisto.params().get('max_fetch', MAX_INCIDENTS_TO_FETCH))):
break
# Incrementing the event offset, regardless of whether new incident will be created.
event_offset += 1
# If attack status is identical to previous assessment status, or the current attack was
# unsuccessful, we won't create incident.
if not event_status_changed(event):
continue
if timestamp_endpoint is None:
t_stamp = event.get('Timestamp') if event.get('Timestamp') else \
event.get('Attack_Timestamp')
else:
t_stamp = event.get(timestamp_endpoint)
# Validate API timestamp.
if validate_timestamp(t_stamp):
try:
alert_created_time = int(date_to_timestamp(t_stamp,
date_format=CY_GENERAL_DATE_FORMAT))
except Exception:
alert_created_time = int(date_to_timestamp(t_stamp,
date_format=CY_UNIQUE_DATE_FORMAT))
# If current alert was created since last fetch time, create XS0AR incident.
if alert_created_time >= last_fetch:
incidents.append(build_incident_dict(event, module_name, t_stamp))
event_counter += 1
# Keep track on the latest incident timestamp.
if alert_created_time > max_alert_created_time:
max_alert_created_time = alert_created_time
return incidents, event_offset, max_alert_created_time, len(events)
|
def format_incidents(events: list, event_offset: int, last_fetch: int, module_name: str,
timestamp_endpoint: str = None) -> Tuple[List[Any], int, int, int]:
"""
This function loops over the alerts list and create incidents from different events.
For `Endpoint Security` and `Kill Chain` modules, if current event name is identical to previous
event name, then both are part of the same incident and we will only update the existing
incident description and won't create a new incident.
Args:
events (list): Events list to create incidents from.
event_offset (int): Event offset.
last_fetch (int): Timestamp in milliseconds on when to start fetching incidents.
module_name (str): Module name.
timestamp_endpoint(str): What API endpoint represent the event timestamp. If None is given,
timestamp endpoint from API can be 'Timestamp' or 'Attack_Timestamp'
Returns:
list: incidents,
int: event_offset,
str: alert_created_time,
int: len(events)
"""
incidents: List[Any] = []
event_counter = 0
offset = event_offset
alert_created_time = last_fetch
max_alert_created_time = alert_created_time
for event in events[offset:]:
# if current event name is identical to previous, then only update incident description.
if (module_name in ('endpoint-security', 'kill-chain')) and \
(incidents and extract_event_name(event, module_name) == incidents[-1].get('name')):
event_offset += 1
last_incident = incidents[-1]
alert_created_time = last_incident.get('occurred')
if module_name == 'endpoint-security':
alert_created_time = int(date_to_timestamp(alert_created_time,
date_format=DATE_FORMAT))
elif module_name == 'kill-chain':
alert_created_time = int(date_to_timestamp(alert_created_time,
date_format=CY_GENERAL_DATE_FORMAT))
step_num = event.get('Scenario_Counter') if module_name == 'endpoint-security' \
else event.get('Stage_Phase')
data = json.loads(last_incident.get('rawJSON'))
current_description = data.get('description')
new_description = f"\nStep {step_num}:\n{extract_event_description(event)}"
data['description'] = f'{current_description}{new_description}' if current_description \
else new_description
# Insert new description to the previous incident.
last_incident['rawJSON'] = json.dumps(data)
incidents[-1] = last_incident
# Keep track on the latest incident timestamp. Events that are part of an incident are
# returned without timestamp so we only use the first the event's timestamp (First step)
if alert_created_time > max_alert_created_time:
max_alert_created_time = alert_created_time
# The current event is new (has new name), then we need to build a new incident.
else:
if event_counter >= min(MAX_INCIDENTS_TO_FETCH,
int(demisto.params().get('max_fetch', MAX_INCIDENTS_TO_FETCH))):
break
# Incrementing the event offset, regardless of whether new incident will be created.
event_offset += 1
# If attack status is identical to previous assessment status, or the current attack was
# unsuccessful, we won't create incident.
if not event_status_changed(event):
continue
if timestamp_endpoint is None:
t_stamp = event.get('Timestamp') if event.get('Timestamp') else \
event.get('Attack_Timestamp')
else:
t_stamp = event.get(timestamp_endpoint)
# Validate API timestamp.
if validate_timestamp(t_stamp):
try:
alert_created_time = date_to_timestamp(t_stamp,
date_format=CY_GENERAL_DATE_FORMAT)
except Exception:
alert_created_time = date_to_timestamp(t_stamp,
date_format=CY_UNIQUE_DATE_FORMAT)
# If current alert was created since last fetch time, create XS0AR incident.
if alert_created_time >= last_fetch:
incidents.append(build_incident_dict(event, module_name, t_stamp))
event_counter += 1
# Keep track on the latest incident timestamp.
if alert_created_time > max_alert_created_time:
max_alert_created_time = alert_created_time
return incidents, event_offset, max_alert_created_time, len(events)
|
3,836 |
def test_all_simple_paths_with_two_targets():
G = nx.path_graph(4)
G.add_edge(2, 4)
paths = nx.all_simple_paths(G, 0, [3, 4])
assert_equal(set(tuple(p) for p in paths), {(0, 1, 2, 3), (0, 1, 2, 4)})
G = nx.path_graph(4, create_using=nx.DiGraph())
G.add_edge(2, 4)
paths = nx.all_simple_paths(G, 0, [3, 4])
assert_equal(set(tuple(p) for p in paths), {(0, 1, 2, 3), (0, 1, 2, 4)})
|
def test_all_simple_paths_with_two_targets_emits_two_paths():
G = nx.path_graph(4)
G.add_edge(2, 4)
paths = nx.all_simple_paths(G, 0, [3, 4])
assert_equal(set(tuple(p) for p in paths), {(0, 1, 2, 3), (0, 1, 2, 4)})
G = nx.path_graph(4, create_using=nx.DiGraph())
G.add_edge(2, 4)
paths = nx.all_simple_paths(G, 0, [3, 4])
assert_equal(set(tuple(p) for p in paths), {(0, 1, 2, 3), (0, 1, 2, 4)})
|
2,359 |
def _estimators_has(attr):
"""Check if self.estimator or self.estimators_[0] has attr.
If `self.estimators_[0]` has the attr, then its safe to assume that other
values has it tool. This function is used together with `avaliable_if`.
"""
return lambda self: (
hasattr(self.estimator, attr)
or (hasattr(self, "estimators_") and hasattr(self.estimators_[0], attr))
)
|
def _estimators_has(attr):
"""Check if self.estimator or self.estimators_[0] has attr.
If `self.estimators_[0]` has the attr, then its safe to assume that other
values has it too. This function is used together with `avaliable_if`.
"""
return lambda self: (
hasattr(self.estimator, attr)
or (hasattr(self, "estimators_") and hasattr(self.estimators_[0], attr))
)
|
33,912 |
def create_and_get_archive_from_remote_node(remote_node: Node,
parameters: GetParameters,
script_path: str = "ray"
) -> Optional[str]:
"""Create an archive containing logs on a remote node and transfer.
This will call ``ray local-dump --stream`` on the remote
node. The resulting file will be saved locally in a temporary file and
returned.
Args:
remote_node (Node): Remote node to gather archive from.
script_path (str): Path to this script on the remote node.
parameters (GetParameters): Parameters (settings) for getting data.
Returns:
Path to a temporary file containing the node's collected data.
"""
cmd = [
"ssh",
"-o StrictHostKeyChecking=no",
"-o UserKnownHostsFile=/dev/null",
"-o LogLevel=ERROR",
"-i",
remote_node.ssh_key,
f"{remote_node.ssh_user}@{remote_node.host}",
]
if remote_node.docker_container:
cmd += [
"docker",
"exec",
remote_node.docker_container,
]
collect_cmd = [script_path, "local-dump", "--stream"]
collect_cmd += ["--logs"] if parameters.logs else ["--no-logs"]
collect_cmd += ["--debug-state"] if parameters.debug_state else [
"--no-debug-state"
]
collect_cmd += ["--pip"] if parameters.pip else ["--no-pip"]
collect_cmd += ["--processes"] if parameters.processes else [
"--no-processes"
]
if parameters.processes:
collect_cmd += ["--processes-verbose"] \
if parameters.processes_verbose else ["--no-proccesses-verbose"]
cmd += ["/bin/bash", "-c", _wrap(collect_cmd, quotes="\"")]
cat = "node" if not remote_node.is_head else "head"
cli_logger.print(f"Collecting data from remote node: {remote_node.host}")
_, tmp = tempfile.mkstemp(
prefix=f"ray_{cat}_{remote_node.host}_", suffix=".tar.gz")[1]
with open(tmp, "wb") as fp:
try:
subprocess.check_call(cmd, stdout=fp, stderr=sys.stderr)
except subprocess.CalledProcessError as exc:
raise RemoteCommandFailed(
f"Gathering logs from remote node failed: {' '.join(cmd)}"
) from exc
return tmp
|
def create_and_get_archive_from_remote_node(remote_node: Node,
parameters: GetParameters,
script_path: str = "ray"
) -> Optional[str]:
"""Create an archive containing logs on a remote node and transfer.
This will call ``ray local-dump --stream`` on the remote
node. The resulting file will be saved locally in a temporary file and
returned.
Args:
remote_node (Node): Remote node to gather archive from.
script_path (str): Path to this script on the remote node.
parameters (GetParameters): Parameters (settings) for getting data.
Returns:
Path to a temporary file containing the node's collected data.
"""
cmd = [
"ssh",
"-o StrictHostKeyChecking=no",
"-o UserKnownHostsFile=/dev/null",
"-o LogLevel=ERROR",
"-i",
remote_node.ssh_key,
f"{remote_node.ssh_user}@{remote_node.host}",
]
if remote_node.docker_container:
cmd += [
"docker",
"exec",
remote_node.docker_container,
]
collect_cmd = [script_path, "local-dump", "--stream"]
collect_cmd += ["--logs"] if parameters.logs else ["--no-logs"]
collect_cmd += ["--debug-state"] if parameters.debug_state else [
"--no-debug-state"
]
collect_cmd += ["--pip"] if parameters.pip else ["--no-pip"]
collect_cmd += ["--processes"] if parameters.processes else [
"--no-processes"
]
if parameters.processes:
collect_cmd += ["--processes-verbose"] \
if parameters.processes_verbose else ["--no-proccesses-verbose"]
cmd += ["/bin/bash", "-c", _wrap(collect_cmd, quotes="\"")]
cat = "node" if not remote_node.is_head else "head"
cli_logger.print(f"Collecting data from remote node: {remote_node.host}")
tmp = tempfile.mkstemp(
prefix=f"ray_{cat}_{remote_node.host}_", suffix=".tar.gz")[1]
with open(tmp, "wb") as fp:
try:
subprocess.check_call(cmd, stdout=fp, stderr=sys.stderr)
except subprocess.CalledProcessError as exc:
raise RemoteCommandFailed(
f"Gathering logs from remote node failed: {' '.join(cmd)}"
) from exc
return tmp
|
54,324 |
def get_scheduler(params, optimizer, num_epochs=0):
"""Get scheduler.
Args:
params (dict): scheduler parameters, see `PyTorch documentation <https://pytorch.org/docs/stable/optim.html>`__
optimizer (torch optim):
num_epochs (int): number of epochs.
Returns:
torch.optim, bool, which indicates if the scheduler is updated for each batch (True), or for each epoch (False).
"""
step_scheduler_batch = False
scheduler_name = params["name"]
del params["name"]
if scheduler_name == "CosineAnnealingLR":
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)
elif scheduler_name == "CosineAnnealingWarmRestarts":
scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, **params)
elif scheduler_name == "CyclicLR":
scheduler = optim.lr_scheduler.CyclicLR(optimizer, **params, mode="triangular2", cycle_momentum=False)
step_scheduler_batch = True
else:
raise ValueError(
"Unknown LR Scheduler name, please choose between 'CosineAnnealingLR', 'CosineAnnealingWarmRestarts',"
"or 'CyclicLR'")
return scheduler, step_scheduler_batch
|
def get_scheduler(params, optimizer, num_epochs=0):
"""Get scheduler.
Args:
params (dict): scheduler parameters, see `PyTorch documentation <https://pytorch.org/docs/stable/optim.html>`__
optimizer (torch optim):
num_epochs (int): number of epochs.
Returns:
torch.optim, bool, which indicates if the scheduler is updated for each batch (True), or for each epoch (False).
"""
step_scheduler_batch = False
scheduler_name = params["name"]
del params["name"]
if scheduler_name == "CosineAnnealingLR":
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)
elif scheduler_name == "CosineAnnealingWarmRestarts":
scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, **params)
elif scheduler_name == "CyclicLR":
scheduler = optim.lr_scheduler.CyclicLR(optimizer, **params, mode="triangular2", cycle_momentum=False)
step_scheduler_batch = True
else:
raise ValueError(
"Unknown LR Scheduler name, please choose between 'CosineAnnealingLR', 'CosineAnnealingWarmRestarts',"
" or 'CyclicLR'")
return scheduler, step_scheduler_batch
|
44,175 |
def catch_warn_ExpvalCost(ansatz, hamiltonian, device, **kwargs):
"""Computes the ExpvalCost and catches the initial deprecation warning."""
with pytest.warns(UserWarning, match="will be deprecated,"):
res = qml.ExpvalCost(ansatz, hamiltonian, device, **kwargs)
return res
|
def catch_warn_ExpvalCost(ansatz, hamiltonian, device, **kwargs):
"""Computes the ExpvalCost and catches the initial deprecation warning."""
with pytest.warns(UserWarning, match="is deprecated,"):
res = qml.ExpvalCost(ansatz, hamiltonian, device, **kwargs)
return res
|
20,376 |
def save(obj, filename, fmt='auto', backend=None, resources='cdn', toolbar=None, **kwargs):
"""
Saves the supplied object to file.
The available output formats depend on the backend being used. By
default and if the filename is a string the output format will be
inferred from the file extension. Otherwise an explicit format
will need to be specified. For ambiguous file extensions such as
html it may be necessary to specify an explicit fmt to override
the default, e.g. in the case of 'html' output the widgets will
default to fmt='widgets', which may be changed to scrubber widgets
using fmt='scrubber'.
Arguments
---------
obj: HoloViews object
The HoloViews object to save to file
filename: string or IO object
The filename or BytesIO/StringIO object to save to
fmt: string
The format to save the object as, e.g. png, svg, html, or gif
and if widgets are desired either 'widgets' or 'scrubber'
backend: string
A valid HoloViews rendering backend, e.g. bokeh or matplotlib
resources: string or bokeh.resource.Resources
Bokeh resources used to load bokehJS components. Defaults to
CDN, to embed resources inline for offline usage use 'inline'
or bokeh.resources.INLINE.
**kwargs: dict
Additional keyword arguments passed to the renderer,
e.g. fps for animations
"""
if toolbar is None:
obj.options(toolbar=None)
backend = backend or Store.current_backend
renderer_obj = renderer(backend)
if kwargs:
renderer_obj = renderer_obj.instance(**kwargs)
if Path is not None and isinstance(filename, Path):
filename = str(filename.absolute())
if isinstance(filename, basestring):
supported = [mfmt for tformats in renderer_obj.mode_formats.values()
for mfmt in tformats]
formats = filename.split('.')
if fmt == 'auto' and formats and formats[-1] != 'html':
fmt = formats[-1]
if formats[-1] in supported:
filename = '.'.join(formats[:-1])
return renderer_obj.save(obj, filename, fmt=fmt, resources=resources)
|
def save(obj, filename, fmt='auto', backend=None, resources='cdn', toolbar=None, **kwargs):
"""
Saves the supplied object to file.
The available output formats depend on the backend being used. By
default and if the filename is a string the output format will be
inferred from the file extension. Otherwise an explicit format
will need to be specified. For ambiguous file extensions such as
html it may be necessary to specify an explicit fmt to override
the default, e.g. in the case of 'html' output the widgets will
default to fmt='widgets', which may be changed to scrubber widgets
using fmt='scrubber'.
Arguments
---------
obj: HoloViews object
The HoloViews object to save to file
filename: string or IO object
The filename or BytesIO/StringIO object to save to
fmt: string
The format to save the object as, e.g. png, svg, html, or gif
and if widgets are desired either 'widgets' or 'scrubber'
backend: string
A valid HoloViews rendering backend, e.g. bokeh or matplotlib
resources: string or bokeh.resource.Resources
Bokeh resources used to load bokehJS components. Defaults to
CDN, to embed resources inline for offline usage use 'inline'
or bokeh.resources.INLINE.
**kwargs: dict
Additional keyword arguments passed to the renderer,
e.g. fps for animations
"""
if (backend == 'bokeh' or (backend is None and Store.current_backend == 'bokeh')) and toolbar is None:
obj.options(toolbar=None)
backend = backend or Store.current_backend
renderer_obj = renderer(backend)
if kwargs:
renderer_obj = renderer_obj.instance(**kwargs)
if Path is not None and isinstance(filename, Path):
filename = str(filename.absolute())
if isinstance(filename, basestring):
supported = [mfmt for tformats in renderer_obj.mode_formats.values()
for mfmt in tformats]
formats = filename.split('.')
if fmt == 'auto' and formats and formats[-1] != 'html':
fmt = formats[-1]
if formats[-1] in supported:
filename = '.'.join(formats[:-1])
return renderer_obj.save(obj, filename, fmt=fmt, resources=resources)
|
45,171 |
def Completed(cls: Type[State] = State, **kwargs) -> State:
"""Convenience function for creating `Completed` states.
Returns:
State: a Completed state
"""
return schemas.states.Completed(cls=cls, **kwargs)
|
def Completed(cls: Type[State] = State, **kwargs) -> State:
"""Convenience function for creating `Completed` states.
Returns:
State: a `Completed` state
"""
return schemas.states.Completed(cls=cls, **kwargs)
|
33,132 |
def rk4(f, x, t, dt, stages=4, s=0):
"""Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers.
The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0
convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama
schemefor SDEs (s > 0.0) with strong / weak convergence order 1.0 for SDEs with
additive noise as defined in the below. See `bib.grudzien2020numerical`.
Parameters
----------
f : function
The time derivative of the dynamical system. Must be of the form `f(t, x)`
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Integration time step.
stages : int, optional
The number of stages of the RK method.
When stages=1, this becomes the Euler (-Maruyama) scheme.
Default: 4.
s : float
The diffusion coeffient (std. dev) for models with additive noise.
Default: 0, yielding deterministic integration.
Returns
-------
ndarray
State vector at the new time, `t+dt`
"""
if s > 0:
# non-trivial diffusion, this defines the SDE integration with additive noise
# generate perturbation for Brownian motion
dims = np.shape(x)
if len(dims) > 1:
N_e, N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal([N_e, N_x])
else:
N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_x)
if stages >=1: k1 = dt * f(t , x) + s * W # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) + s * W # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) + s * W # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) + s * W # noqa
k4 = dt * f(t+dt , x+k3) + s * W # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
else:
# deterministic integration
if stages >=1: k1 = dt * f(t , x) # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
# fmt: on
|
def rk4(f, x, t, dt, stages=4, s=0):
"""Runge-Kutta (explicit, non-adaptive) numerical (S)ODE solvers.
The rule has strong / weak convergence order 1.0 for generic SDEs and order 4.0
convergence for ODEs when stages=4. For stages=1, this becomes the Euler-Maruyama
schemefor SDEs (s > 0.0) with strong / weak convergence order 1.0 for SDEs with
additive noise as defined in the below. See `bib.grudzien2020numerical`.
Parameters
----------
f : function
The time derivative of the dynamical system. Must be of the form `f(t, x)`
x : ndarray or float
State vector of the forcing term
t : float
Starting time of the integration
dt : float
Integration time step.
stages : int, optional
The number of stages of the RK method.
When stages=1, this becomes the Euler (-Maruyama) scheme.
Default: 4.
s : float
The diffusion coeffient (std. dev) for models with additive noise.
Default: 0, yielding deterministic integration.
Returns
-------
ndarray
State vector at the new time, `t+dt`
"""
if s > 0:
# non-trivial diffusion, this defines the SDE integration with additive noise
# generate perturbation for Brownian motion
dims = np.shape(x)
if len(dims) > 1:
N, Nx = dims
W = np.sqrt(dt) * np.random.standard_normal([N_e, N_x])
else:
N_x , = dims
W = np.sqrt(dt) * np.random.standard_normal(N_x)
if stages >=1: k1 = dt * f(t , x) + s * W # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) + s * W # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) + s * W # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) + s * W # noqa
k4 = dt * f(t+dt , x+k3) + s * W # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
else:
# deterministic integration
if stages >=1: k1 = dt * f(t , x) # noqa
if stages >=2: k2 = dt * f(t+dt/2.0, x+k1/2.0) # noqa
if stages ==3: k3 = dt * f(t+dt , x+k2*2.0-k1) # noqa
if stages ==4: # noqa
k3 = dt * f(t+dt/2.0, x+k2/2.0) # noqa
k4 = dt * f(t+dt , x+k3) # noqa
if stages ==1: return x + k1 # noqa
elif stages ==2: return x + k2 # noqa
elif stages ==3: return x + (k1 + 4.0*k2 + k3)/6.0 # noqa
elif stages ==4: return x + (k1 + 2.0*(k2 + k3) + k4)/6.0 # noqa
else: raise NotImplementedError # noqa
# fmt: on
|
59,616 |
def _is_valid_resolution(resolution):
"""
Check if a resolution is valid for the global Earth relief grid.
Parameters
----------
resolution : str
Same as the input for load_earth_relief
Raises
------
GMTInvalidInput
If given resolution is not valid.
Examples
--------
>>> _is_valid_resolution("01d")
>>> _is_valid_resolution("60m")
>>> _is_valid_resolution("5m")
Traceback (most recent call last):
...
pygmt.exceptions.GMTInvalidInput: Invalid Earth relief resolution '5m'.
>>> _is_valid_resolution("15s")
>>> _is_valid_resolution("01s")
Traceback (most recent call last):
...
pygmt.exceptions.GMTInvalidInput: Invalid Earth relief resolution '01s'.
"""
valid_resolutions = ["01d"]
valid_resolutions.extend(
["{:02d}m".format(res) for res in [60, 30, 20, 15, 10, 6, 5, 4, 3, 2, 1]]
)
valid_resolutions.extend(["{:02d}s".format(res) for res in [30, 15]])
if resolution not in valid_resolutions:
raise GMTInvalidInput(
"Invalid Earth relief resolution '{}'.".format(resolution)
)
|
def _is_valid_resolution(resolution):
"""
Check if a resolution is valid for the global Earth relief grid.
Parameters
----------
resolution : str
Same as the input for load_earth_relief
Raises
------
GMTInvalidInput
If given resolution is not valid.
Examples
--------
>>> _is_valid_resolution("01d")
>>> _is_valid_resolution("60m")
>>> _is_valid_resolution("5m")
Traceback (most recent call last):
...
pygmt.exceptions.GMTInvalidInput: Invalid Earth relief resolution '5m'.
>>> _is_valid_resolution("15s")
>>> _is_valid_resolution("01s")
Traceback (most recent call last):
...
pygmt.exceptions.GMTInvalidInput: Invalid Earth relief resolution '01s'.
"""
valid_resolutions = ["01d"]
valid_resolutions.extend(
[f"{res:02d}m" for res in [60, 30, 20, 15, 10, 6, 5, 4, 3, 2, 1]]
)
valid_resolutions.extend([f"{res:02d}s" for res in [30, 15]])
if resolution not in valid_resolutions:
raise GMTInvalidInput(
"Invalid Earth relief resolution '{}'.".format(resolution)
)
|
57,755 |
def main() -> None:
params = demisto.params()
args = demisto.args()
hostname = params.get('hostname')
api_id = params.get('api_id')
api_key = params.get('api_key')
handle_proxy()
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
commands = {
'cloudshare-get-envs': get_envs_command,
'cloudshare-get-projects': get_projects_command,
'cloudshare-get-project': get_project_command,
'cloudshare-get-project-policies': get_project_policies_command,
'cloudshare-get-project-blueprints': get_project_blueprints_command,
'cloudshare-get-project-blueprint': get_project_blueprint_command,
'cloudshare-get-classes': get_classes_command,
'cloudshare-get-class': get_class_command,
'cloudshare-delete-class': delete_class_command,
'cloudshare-delete-class-environemtns': delete_class_environments_command,
'cloudshare-delete-class-environemnts': delete_class_environments_command,
'cloudshare-get-classes-countries': get_classes_countries_command,
'cloudshare-get-classes-customfields': get_classes_customfields_command,
'cloudshare-get-classes-detailed': get_classes_detailed_command,
'cloudshare-get-classes-instructors': get_classes_instructors_command,
'cloudshare-create-class': create_class_command,
'cloudshare-send-class-invitations': send_class_invitations_command,
'cloudshare-suspend-class-environments': suspend_class_environments_command,
'cloudshare-modify-class': modify_class_command,
'cloudshare-get-students': get_students_command,
'cloudshare-get-student': delete_student_command,
'cloudshare-register-student': register_student_command,
'cloudshare-modify-student': modify_student_command,
'cloudshare-get-regions': get_regions_command,
'cloudshare-get-timezones': get_timezones_command,
'cloudshare-get-env-resource': get_env_resources_command,
'cloudshare-get-env-extended': get_env_extended_command,
'cloudshare-get-env-extended-vanity': get_env_extended_vanity_command,
'cloudshare-get-env-extended-token': get_env_extended_token_command,
'cloudshare-get-env-multiple-resources': get_env_multiple_resources_command,
'cloudshare-extend-env': extend_env_command,
'cloudshare-postpone-env-suspend': postpone_env_suspend_command,
'cloudshare-resume-env': resume_env_command,
'cloudshare-revert-env': revert_env_command,
'cloudshare-suspend-env': suspend_env_command,
'cloudshare-get-env': get_env_command,
'cloudshare-delete-env': delete_env_command,
'cloudshare-create-env': create_env_command,
'cloudshare-modify-env': modify_env_command,
'cloudshare-delete-vm': delete_vm_command,
'cloudshare-check-vm-execution-status': vm_check_execution_status_command,
'cloudshare-get-vm-remote-access-file': vm_get_remote_command,
'cloudshare-execute-vm-command': vm_execute_command,
'cloudshare-modify-vm-hardware': vm_modify_hardware_command,
'cloudshare-reboot-vm': reboot_vm_command,
'cloudshare-revert-vm': revert_vm_command,
'cloudshare-get-cloud-folders': get_cloud_folders_command,
'cloudshare-get-env-cloud-folders': get_env_cloud_folders_command,
'cloudshare-generate-cloud-folder-password': generate_password_folder_command,
'cloudshare-unmount-env-folders': unmount_env_folders_command,
'cloudshare-get-templates': get_templates_command,
'cloudshare-get-snapshot': get_snapshot_command,
'cloudshare-get-env-snapshots': get_env_snapshots_command,
'cloudshare-mark-default-snapshot': mark_default_snapshot_command,
'cloudshare-take-snapshot-env': take_snapshot_env_command,
'cloudshare-get-teams': get_teams_command,
'cloudshare-invite-user-poc': invite_user_poc_command,
'cloudshare-get-poc-invitations': get_poc_invitations_command
}
client = Client(
hostname,
api_id=api_id,
api_key=api_key
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
test_module_command(client, args)
else:
commands[command](client, args)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
params = demisto.params()
args = demisto.args()
hostname = params.get('hostname')
api_id = params.get('api_id')
api_key = params.get('api_key')
handle_proxy()
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
commands = {
'cloudshare-get-envs': get_envs_command,
'cloudshare-get-projects': get_projects_command,
'cloudshare-get-project': get_project_command,
'cloudshare-get-project-policies': get_project_policies_command,
'cloudshare-get-project-blueprints': get_project_blueprints_command,
'cloudshare-get-project-blueprint': get_project_blueprint_command,
'cloudshare-get-classes': get_classes_command,
'cloudshare-get-class': get_class_command,
'cloudshare-delete-class': delete_class_command,
'cloudshare-delete-class-environemtns': delete_class_environments_command, # This is here for maintaining BC
'cloudshare-delete-class-environemnts': delete_class_environments_command,
'cloudshare-get-classes-countries': get_classes_countries_command,
'cloudshare-get-classes-customfields': get_classes_customfields_command,
'cloudshare-get-classes-detailed': get_classes_detailed_command,
'cloudshare-get-classes-instructors': get_classes_instructors_command,
'cloudshare-create-class': create_class_command,
'cloudshare-send-class-invitations': send_class_invitations_command,
'cloudshare-suspend-class-environments': suspend_class_environments_command,
'cloudshare-modify-class': modify_class_command,
'cloudshare-get-students': get_students_command,
'cloudshare-get-student': delete_student_command,
'cloudshare-register-student': register_student_command,
'cloudshare-modify-student': modify_student_command,
'cloudshare-get-regions': get_regions_command,
'cloudshare-get-timezones': get_timezones_command,
'cloudshare-get-env-resource': get_env_resources_command,
'cloudshare-get-env-extended': get_env_extended_command,
'cloudshare-get-env-extended-vanity': get_env_extended_vanity_command,
'cloudshare-get-env-extended-token': get_env_extended_token_command,
'cloudshare-get-env-multiple-resources': get_env_multiple_resources_command,
'cloudshare-extend-env': extend_env_command,
'cloudshare-postpone-env-suspend': postpone_env_suspend_command,
'cloudshare-resume-env': resume_env_command,
'cloudshare-revert-env': revert_env_command,
'cloudshare-suspend-env': suspend_env_command,
'cloudshare-get-env': get_env_command,
'cloudshare-delete-env': delete_env_command,
'cloudshare-create-env': create_env_command,
'cloudshare-modify-env': modify_env_command,
'cloudshare-delete-vm': delete_vm_command,
'cloudshare-check-vm-execution-status': vm_check_execution_status_command,
'cloudshare-get-vm-remote-access-file': vm_get_remote_command,
'cloudshare-execute-vm-command': vm_execute_command,
'cloudshare-modify-vm-hardware': vm_modify_hardware_command,
'cloudshare-reboot-vm': reboot_vm_command,
'cloudshare-revert-vm': revert_vm_command,
'cloudshare-get-cloud-folders': get_cloud_folders_command,
'cloudshare-get-env-cloud-folders': get_env_cloud_folders_command,
'cloudshare-generate-cloud-folder-password': generate_password_folder_command,
'cloudshare-unmount-env-folders': unmount_env_folders_command,
'cloudshare-get-templates': get_templates_command,
'cloudshare-get-snapshot': get_snapshot_command,
'cloudshare-get-env-snapshots': get_env_snapshots_command,
'cloudshare-mark-default-snapshot': mark_default_snapshot_command,
'cloudshare-take-snapshot-env': take_snapshot_env_command,
'cloudshare-get-teams': get_teams_command,
'cloudshare-invite-user-poc': invite_user_poc_command,
'cloudshare-get-poc-invitations': get_poc_invitations_command
}
client = Client(
hostname,
api_id=api_id,
api_key=api_key
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
test_module_command(client, args)
else:
commands[command](client, args)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
55,670 |
def equalize(img):
"""Equalize the image histogram. This function applies a non-linear mapping
to the input image, in order to create a uniform distribution of grayscale
values in the output image.
Args:
img (ndarray): Image to be equalized.
Returns:
ndarray: The equalized image.
"""
def _scale_channel(im, c):
"""Scale the data in the corresponding channel."""
im = im[:, :, c]
# Compute the histogram of the image channel.
histo = np.histogram(im, 256, (0, 255))[0]
# For computing the step, filter out the nonzeros.
nonzero_histo = histo[histo > 0]
step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255
def _build_lut(histo, step, eps=1e-5):
# Compute the cumulative sum, shifted by step // 2
# and then normalized by step.
if step:
lut = (np.cumsum(histo) + (step // 2)) // step
else:
lut = (np.cumsum(histo) + (step // 2)) // (step + eps)
# Shift lut, prepending with 0.
lut = np.concatenate([[0], lut[:-1]], 0)
# Clip the counts to be in range.
return np.clip(lut, 0, 255)
# If step is zero, return the original image. Otherwise,
# build lut from the full histogram and step, and then
# index from it.
result = np.where(np.equal(step, 0), im, _build_lut(histo, step)[im])
return result
# Scales each channel independently and then stacks
# the result.
s1 = _scale_channel(img, 0)
s2 = _scale_channel(img, 1)
s3 = _scale_channel(img, 2)
equalized_img = np.stack([s1, s2, s3], axis=-1)
return equalized_img
|
def equalize(img):
"""Equalize the image histogram.
This function applies a non-linear mapping
to the input image, in order to create a uniform distribution of grayscale
values in the output image.
Args:
img (ndarray): Image to be equalized.
Returns:
ndarray: The equalized image.
"""
def _scale_channel(im, c):
"""Scale the data in the corresponding channel."""
im = im[:, :, c]
# Compute the histogram of the image channel.
histo = np.histogram(im, 256, (0, 255))[0]
# For computing the step, filter out the nonzeros.
nonzero_histo = histo[histo > 0]
step = (np.sum(nonzero_histo) - nonzero_histo[-1]) // 255
def _build_lut(histo, step, eps=1e-5):
# Compute the cumulative sum, shifted by step // 2
# and then normalized by step.
if step:
lut = (np.cumsum(histo) + (step // 2)) // step
else:
lut = (np.cumsum(histo) + (step // 2)) // (step + eps)
# Shift lut, prepending with 0.
lut = np.concatenate([[0], lut[:-1]], 0)
# Clip the counts to be in range.
return np.clip(lut, 0, 255)
# If step is zero, return the original image. Otherwise,
# build lut from the full histogram and step, and then
# index from it.
result = np.where(np.equal(step, 0), im, _build_lut(histo, step)[im])
return result
# Scales each channel independently and then stacks
# the result.
s1 = _scale_channel(img, 0)
s2 = _scale_channel(img, 1)
s3 = _scale_channel(img, 2)
equalized_img = np.stack([s1, s2, s3], axis=-1)
return equalized_img
|
25,124 |
def open_consolidated(store, metadata_key='.zmetadata', mode='r+', **kwargs):
"""Open group using metadata previously consolidated into a single key.
This is an optimised method for opening a Zarr group, where instead of
traversing the group/array hierarchy by accessing the metadata keys at
each level, a single key contains all of the metadata for everything.
For remote data sources where the overhead of accessing a key is large
compared to the time to read data.
The group accessed must have already had its metadata consolidated into a
single key using the function :func:`consolidate_metadata`.
This optimised method only works in modes which do not change the
metadata, although the data may still be written/updated.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
metadata_key : str
Key to read the consolidated metadata from. The default (.zmetadata)
corresponds to the default used by :func:`consolidate_metadata`.
mode : {'r', 'r+'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist) although only writes to data are allowed,
changes to metadata including creation of new arrays or group
are not allowed.
**kwargs
Additional parameters are passed through to :func:`zarr.creation.open_array` or
:func:`zarr.hierarchy.open_group`.
Returns
-------
g : :class:`zarr.hierarchy.Group`
Group instance, opened with the consolidated metadata.
See Also
--------
consolidate_metadata
"""
from .storage import ConsolidatedMetadataStore
# normalize parameters
store = normalize_store_arg(store)
if mode not in {'r', 'r+'}:
raise ValueError("invalid mode, expected either 'r' or 'r+'; found {!r}"
.format(mode))
# setup metadata sotre
meta_store = ConsolidatedMetadataStore(store, metadata_key=metadata_key)
# pass through
chunk_store = kwargs.pop('chunk_store', None)
if chunk_store is None:
chunk_store = store
return open(store=meta_store, chunk_store=chunk_store, mode=mode, **kwargs)
|
def open_consolidated(store, metadata_key='.zmetadata', mode='r+', **kwargs):
"""Open group using metadata previously consolidated into a single key.
This is an optimised method for opening a Zarr group, where instead of
traversing the group/array hierarchy by accessing the metadata keys at
each level, a single key contains all of the metadata for everything.
For remote data sources where the overhead of accessing a key is large
compared to the time to read data.
The group accessed must have already had its metadata consolidated into a
single key using the function :func:`consolidate_metadata`.
This optimised method only works in modes which do not change the
metadata, although the data may still be written/updated.
Parameters
----------
store : MutableMapping or string
Store or path to directory in file system or name of zip file.
metadata_key : str
Key to read the consolidated metadata from. The default (.zmetadata)
corresponds to the default used by :func:`consolidate_metadata`.
mode : {'r', 'r+'}, optional
Persistence mode: 'r' means read only (must exist); 'r+' means
read/write (must exist) although only writes to data are allowed,
changes to metadata including creation of new arrays or group
are not allowed.
**kwargs
Additional parameters are passed through to :func:`zarr.creation.open_array` or
:func:`zarr.hierarchy.open_group`.
Returns
-------
g : :class:`zarr.hierarchy.Group`
Group instance, opened with the consolidated metadata.
See Also
--------
consolidate_metadata
"""
from .storage import ConsolidatedMetadataStore
# normalize parameters
store = normalize_store_arg(store)
if mode not in {'r', 'r+'}:
raise ValueError("invalid mode, expected either 'r' or 'r+'; found {!r}"
.format(mode))
# setup metadata sotre
meta_store = ConsolidatedMetadataStore(store, metadata_key=metadata_key)
# pass through
chunk_store = kwargs.pop('chunk_store', store)
return open(store=meta_store, chunk_store=chunk_store, mode=mode, **kwargs)
|
31,620 |
def main():
params = demisto.params()
command = demisto.command()
demisto.info(f'Command being called is {command}')
try:
client = Client(server_url=params.get('server_url'),
use_ssl=not params.get('insecure', False),
proxy=params.get('proxy'),
feed_tags=argToList(params.get('feedTags')),
tlp_color=params.get('tlp_color'),
content_max_size=int(params.get('max_size', '45')))
client.create_indicators_from_response()
if demisto.command() == 'test-module':
# if the client was created successfully and there is data in feed the test is successful.
return_results("ok")
elif demisto.command() == 'rss-get-indicators':
return_results(get_indicators(client, demisto.args()))
elif demisto.command() == 'fetch-indicators':
for iter_ in batch(client.parsed_indicators, batch_size=2000):
demisto.createIndicators(iter_)
else:
raise NotImplementedError(f'Command {command} is not implemented.')
# Log exceptions and return errors
except ValueError:
raise DemistoException("Article content max size must be a number, for example 50.")
except Exception as err:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f"Failed to execute {command} command.\nError:\n{str(err)}")
|
def main():
params = demisto.params()
command = demisto.command()
demisto.info(f'Command being called is {command}')
try:
client = Client(server_url=params.get('server_url'),
use_ssl=not params.get('insecure', False),
proxy=params.get('proxy'),
feed_tags=argToList(params.get('feedTags')),
tlp_color=params.get('tlp_color'),
content_max_size=int(params.get('max_size', '45')))
client.create_indicators_from_response()
if demisto.command() == 'test-module':
# if the client was created successfully and there is data in feed the test is successful.
return_results("ok")
elif command == 'rss-get-indicators':
return_results(get_indicators(client, demisto.args()))
elif demisto.command() == 'fetch-indicators':
for iter_ in batch(client.parsed_indicators, batch_size=2000):
demisto.createIndicators(iter_)
else:
raise NotImplementedError(f'Command {command} is not implemented.')
# Log exceptions and return errors
except ValueError:
raise DemistoException("Article content max size must be a number, for example 50.")
except Exception as err:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f"Failed to execute {command} command.\nError:\n{str(err)}")
|
33,664 |
def Concurrently(ops: List[LocalIterator],
*,
mode="round_robin",
output_indexes=None):
"""Operator that runs the given parent iterators concurrently.
Arguments:
mode (str): One of {'round_robin', 'async'}.
- In 'round_robin' mode, we alternate between pulling items from
each parent iterator in order deterministically.
- In 'async' mode, we pull from each parent iterator as fast as
they are produced. This is non-deterministic.
output_indexes (list): If specified, only output results from the
given ops. For example, if output_indexes=[0], only results from
the first op in ops will be returned.
>>> sim_op = ParallelRollouts(...).for_each(...)
>>> replay_op = LocalReplay(...).for_each(...)
>>> combined_op = Concurrently([sim_op, replay_op], mode="async")
"""
if len(ops) < 2:
raise ValueError("Should specify at least 2 ops.")
if mode == "round_robin":
deterministic = True
elif mode == "async":
deterministic = False
else:
raise ValueError("Unknown mode {}".format(mode))
if output_indexes:
for i in output_indexes:
assert i in range(len(ops)), ("Index out of range", i)
def tag(op, i):
return op.for_each(lambda x: (i, x))
ops = [tag(op, i) for i, op in enumerate(ops)]
output = ops[0].union(*ops[1:], deterministic=deterministic)
if output_indexes:
output = (output.filter(lambda tup: tup[0] in output_indexes)
.for_each(lambda tup: tup[1]))
return output
|
def Concurrently(ops: List[LocalIterator],
*,
mode="round_robin",
output_indexes=None):
"""Operator that runs the given parent iterators concurrently.
Arguments:
mode (str): One of {'round_robin', 'async'}.
- In 'round_robin' mode, we alternate between pulling items from
each parent iterator in order deterministically.
- In 'async' mode, we pull from each parent iterator as fast as
they are produced. This is non-deterministic.
output_indexes (List[int]): If specified, only output results from the
given ops. For example, if output_indexes=[0], only results from
the first op in ops will be returned.
>>> sim_op = ParallelRollouts(...).for_each(...)
>>> replay_op = LocalReplay(...).for_each(...)
>>> combined_op = Concurrently([sim_op, replay_op], mode="async")
"""
if len(ops) < 2:
raise ValueError("Should specify at least 2 ops.")
if mode == "round_robin":
deterministic = True
elif mode == "async":
deterministic = False
else:
raise ValueError("Unknown mode {}".format(mode))
if output_indexes:
for i in output_indexes:
assert i in range(len(ops)), ("Index out of range", i)
def tag(op, i):
return op.for_each(lambda x: (i, x))
ops = [tag(op, i) for i, op in enumerate(ops)]
output = ops[0].union(*ops[1:], deterministic=deterministic)
if output_indexes:
output = (output.filter(lambda tup: tup[0] in output_indexes)
.for_each(lambda tup: tup[1]))
return output
|
29,762 |
def get_user_recently_played(user):
""" Get tracks from the current user’s recently played tracks.
"""
latest_listened_at_ts = int(user.latest_listened_at.timestamp() * 1000) # latest listen UNIX ts in ms
return user.get_spotipy_client().current_user_recently_played(limit=50, after=latest_listened_at_ts)
|
def get_user_recently_played(user):
""" Get tracks from the current user’s recently played tracks.
"""
latest_listened_at_ts = int(user.latest_listened_at.timestamp() * 1000) # latest listen UNIX ts in ms
return make_api_request(user.get_spotipy_client().current_user_recently_played, limit=50, after=latest_listened_at_ts)
|
55,110 |
def dot(tensor1, tensor2):
"""Returns the matrix or dot product of two tensors.
* If both tensors are 0-dimensional, elementwise multiplication
is performed and a 0-dimensional scalar returned.
* If both tensors are 1-dimensional, the dot product is returned.
* If the first array is 2-dimensional and the second array 1-dimensional,
the matrix-vector product is returned.
* If both tensors are 2-dimensional, the matrix product is returned.
* Finally, if the the first array is N-dimensional and the second array
M-dimensional, a sum product over the last dimension of the first array,
and the second-to-last dimension of the second array is returned.
Args:
tensor1 (tensor_like): input tensor
tensor2 (tensor_like): input tensor
Returns:
tensor_like: the matrix or dot product of two tensors
"""
interface = _multi_dispatch([tensor1, tensor2])
x, y = np.coerce([tensor1, tensor2], like=interface)
if interface == "torch":
if x.ndim == 0 and y.ndim == 0:
return x * y
if x.ndim <= 2 and y.ndim <= 2:
return x @ y
return np.tensordot(x, y, dims=[[-1], [-2]], like=interface)
if interface == "tensorflow":
if x.ndim == 0 and y.ndim == 0:
return x * y
if y.ndim == 1:
return np.tensordot(x, y, axes=[[-1], [0]], like=interface)
if x.ndim == 2 and y.ndim == 2:
return x @ y
return np.tensordot(x, y, axes=[[-1], [-2]], like=interface)
return np.dot(x, y, like=interface)
|
def dot(tensor1, tensor2):
"""Returns the matrix or dot product of two tensors.
* If both tensors are 0-dimensional, elementwise multiplication
is performed and a 0-dimensional scalar returned.
* If both tensors are 1-dimensional, the dot product is returned.
* If the first array is 2-dimensional and the second array 1-dimensional,
the matrix-vector product is returned.
* If both tensors are 2-dimensional, the matrix product is returned.
* Finally, if the the first array is N-dimensional and the second array
M-dimensional, a sum product over the last dimension of the first array,
and the second-to-last dimension of the second array is returned.
Returns:
tensor_like[float]: The tensor with the value added at the given indices.
Args:
tensor1 (tensor_like): input tensor
tensor2 (tensor_like): input tensor
Returns:
tensor_like: the matrix or dot product of two tensors
"""
interface = _multi_dispatch([tensor1, tensor2])
x, y = np.coerce([tensor1, tensor2], like=interface)
if interface == "torch":
if x.ndim == 0 and y.ndim == 0:
return x * y
if x.ndim <= 2 and y.ndim <= 2:
return x @ y
return np.tensordot(x, y, dims=[[-1], [-2]], like=interface)
if interface == "tensorflow":
if x.ndim == 0 and y.ndim == 0:
return x * y
if y.ndim == 1:
return np.tensordot(x, y, axes=[[-1], [0]], like=interface)
if x.ndim == 2 and y.ndim == 2:
return x @ y
return np.tensordot(x, y, axes=[[-1], [-2]], like=interface)
return np.dot(x, y, like=interface)
|
43,901 |
def generate_basis_set(l, alpha, coeff, r):
r"""Generate a set of basis function objects.
Args:
l list((tuple[int])): angular momentum numbers of the basis function.
alpha list((array(float))): exponents of the Gaussian functions forming basis functions
coeff list((array(float))): coefficients of the contracted Gaussian functions
r list((array(float))): positions of the Gaussian functions forming the basis functions
Returns:
list(BasisFunction): list containing a set of basis function objects.
**Example**
>>> l = [(0, 0, 0), (0, 0, 0)]
>>> exponents = [[3.42525091, 0.62391373, 0.1688554], [3.42525091, 0.62391373, 0.1688554]]
>>> coefficients = [[0.15432897, 0.53532814, 0.44463454], [0.15432897, 0.53532814, 0.44463454]]
>>> centers = [[0.0, 0.0, -0.694349], [0.0, 0.0, 0.694349]]
>>> basis_set = generate_basis_set(l, exponents, coefficients, centers)
>>> print(basis_set)
[<molecule.BasisFunction object at 0x7f7566db2910>, <molecule.BasisFunction object at 0x7f7566db2a30>]
"""
return [BasisFunction(l[i], alpha[i], coeff[i], r[i]) for i in range(len(l))]
|
def generate_basis_set(l, alpha, coeff, r):
r"""Generate a set of basis function objects.
Args:
l list((tuple[int])): angular momentum numbers of the basis function.
alpha list((array(float))): exponents of the Gaussian functions forming basis functions
coeff list((array(float))): coefficients of the contracted Gaussian functions
r list((array(float))): positions of the Gaussian functions forming the basis functions
Returns:
list[BasisFunction]: list containing a set of basis function objects
**Example**
>>> l = [(0, 0, 0), (0, 0, 0)]
>>> exponents = [[3.42525091, 0.62391373, 0.1688554], [3.42525091, 0.62391373, 0.1688554]]
>>> coefficients = [[0.15432897, 0.53532814, 0.44463454], [0.15432897, 0.53532814, 0.44463454]]
>>> centers = [[0.0, 0.0, -0.694349], [0.0, 0.0, 0.694349]]
>>> basis_set = generate_basis_set(l, exponents, coefficients, centers)
>>> print(basis_set)
[<molecule.BasisFunction object at 0x7f7566db2910>, <molecule.BasisFunction object at 0x7f7566db2a30>]
"""
return [BasisFunction(l[i], alpha[i], coeff[i], r[i]) for i in range(len(l))]
|
45,963 |
def pyrdown(input: torch.Tensor, border_type: str = 'reflect', align_corners: bool = False, factor: float = 2.0) -> torch.Tensor:
r"""Blur a tensor and downsamples it.
.. image:: _static/img/pyrdown.png
Args:
input: the tensor to be downsampled.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``.
align_corners: interpolation flag.
factor: downsampling factor
Return:
the downsampled tensor.
Examples:
>>> input = torch.arange(16, dtype=torch.float32).reshape(1, 1, 4, 4)
>>> pyrdown(input, align_corners=True)
tensor([[[[ 3.7500, 5.2500],
[ 9.7500, 11.2500]]]])
"""
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}")
kernel: torch.Tensor = _get_pyramid_gaussian_kernel()
_, _, height, width = input.shape
# blur image
x_blur: torch.Tensor = filter2d(input, kernel, border_type)
# TODO: use kornia.geometry.resize/rescale
# downsample.
out: torch.Tensor = F.interpolate(
x_blur,
size=(int(float(height) / factor), int(float(width) // factor)),
mode='bilinear',
align_corners=align_corners
)
return out
|
def pyrdown(input: torch.Tensor, border_type: str = 'reflect', align_corners: bool = False, factor: float = 2.0) -> torch.Tensor:
r"""Blur a tensor and downsamples it.
.. image:: _static/img/pyrdown.png
Args:
input: the tensor to be downsampled.
border_type: the padding mode to be applied before convolving.
The expected modes are: ``'constant'``, ``'reflect'``,
``'replicate'`` or ``'circular'``.
align_corners: interpolation flag.
factor: the downsampling factor.
Return:
the downsampled tensor.
Examples:
>>> input = torch.arange(16, dtype=torch.float32).reshape(1, 1, 4, 4)
>>> pyrdown(input, align_corners=True)
tensor([[[[ 3.7500, 5.2500],
[ 9.7500, 11.2500]]]])
"""
if not len(input.shape) == 4:
raise ValueError(f"Invalid input shape, we expect BxCxHxW. Got: {input.shape}")
kernel: torch.Tensor = _get_pyramid_gaussian_kernel()
_, _, height, width = input.shape
# blur image
x_blur: torch.Tensor = filter2d(input, kernel, border_type)
# TODO: use kornia.geometry.resize/rescale
# downsample.
out: torch.Tensor = F.interpolate(
x_blur,
size=(int(float(height) / factor), int(float(width) // factor)),
mode='bilinear',
align_corners=align_corners
)
return out
|
32,153 |
def pagination(incidents_list: List, limit: int, page: int):
"""
:param incidents_list: The incidents_list from the API.
:param limit: Maximum number of objects to retrieve.
:param page: Page number
Returns:
Return a list of objects from the response according to the page and limit per page.
"""
limit = MAX_PAGE_SIZE if limit > MAX_PAGE_SIZE else limit
start = (page - 1) * limit
end = page * limit
return incidents_list[start:end]
|
def pagination(incidents_list: List, limit: int, page: int):
"""
:param incidents_list: The incidents_list from the API.
:param limit: Maximum number of objects to retrieve.
:param page: Page number
Returns:
Return a list of objects from the response according to the page and limit per page.
"""
limit = MAX_PAGE_SIZE if limit > MAX_PAGE_SIZE else limit
start = (page - 1) * limit
end = start + limit
return incidents_list[start:end]
|
54,878 |
def fused_rms_norm(input, normalized_shape, eps=1e-6):
# args = _cast_if_autocast_enabled(input, normalized_shape, eps)
args = _cast_if_autocast_enabled(input, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedRMSNormFunction.apply(*args)
|
def fused_rms_norm(input, normalized_shape, eps=1e-6):
args = _cast_if_autocast_enabled(input, normalized_shape, eps)
with torch.cuda.amp.autocast(enabled=False):
return FusedRMSNormFunction.apply(*args)
|
46,662 |
def info_from_p2p_addr(addr: multiaddr.Multiaddr) -> PeerInfo:
if not addr:
raise InvalidAddrError("addr should not be None")
if not isinstance(addr, multiaddr.Multiaddr):
raise InvalidAddrError(f"addr={addr} should be in type Multiaddr")
parts = addr.split()
if not parts:
raise InvalidAddrError()
p2p_part = parts[-1]
last_protocol_code = p2p_part.protocols()[0].code
if last_protocol_code != multiaddr.protocols.P_P2P:
raise InvalidAddrError(
f"the last protocol should be P_P2P instead of {last_protocol_code}"
)
# make sure the /p2p value parses as a peer.ID
peer_id_str = p2p_part.value_for_protocol(multiaddr.protocols.P_P2P)
peer_id = id_b58_decode(peer_id_str)
# we might have received just an / p2p part, which means there's no addr.
if len(parts) > 1:
addr = multiaddr.Multiaddr.join(*parts[:-1])
peer_data = PeerData()
peer_data.addrs = [addr]
peer_data.protocols = [p.code for p in addr.protocols()]
return PeerInfo(peer_id, peer_data)
|
def info_from_p2p_addr(addr: multiaddr.Multiaddr) -> PeerInfo:
if not addr:
raise InvalidAddrError("`addr` should not be `None`")
if not isinstance(addr, multiaddr.Multiaddr):
raise InvalidAddrError(f"addr={addr} should be in type Multiaddr")
parts = addr.split()
if not parts:
raise InvalidAddrError()
p2p_part = parts[-1]
last_protocol_code = p2p_part.protocols()[0].code
if last_protocol_code != multiaddr.protocols.P_P2P:
raise InvalidAddrError(
f"the last protocol should be P_P2P instead of {last_protocol_code}"
)
# make sure the /p2p value parses as a peer.ID
peer_id_str = p2p_part.value_for_protocol(multiaddr.protocols.P_P2P)
peer_id = id_b58_decode(peer_id_str)
# we might have received just an / p2p part, which means there's no addr.
if len(parts) > 1:
addr = multiaddr.Multiaddr.join(*parts[:-1])
peer_data = PeerData()
peer_data.addrs = [addr]
peer_data.protocols = [p.code for p in addr.protocols()]
return PeerInfo(peer_id, peer_data)
|
41,952 |
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
# TODO(HideakiImamura) Unpin mypy version after fixing "Duplicate modules" error in
# examples and tutorials.
"checking": ["black", "hacking", "isort", "mypy==0.790", "blackdoc"],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
"matplotlib>=3.0.0",
"pandas",
"plotly>=4.0.0",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"mlflow",
],
"document": [
"sphinx",
"sphinx_rtd_theme",
"sphinx-copybutton",
"sphinx-gallery",
"sphinx-plotly-directive",
"pillow",
"matplotlib",
"scikit-learn",
"plotly>=4.0.0", # optuna/visualization.
"pandas",
"lightgbm",
"torch==1.7.1",
"torchvision==0.8.2",
"torchaudio==0.7.2",
"thop",
],
"example": [
"catboost",
"chainer",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"nbval",
"scikit-image",
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
"xgboost",
"keras",
"tensorflow>=2.0.0",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=1.0.2",
"thop",
"skorch",
"stable-baselines3>=0.7.0",
"catalyst",
"torch==1.7.1 ; sys_platform=='darwin'",
"torch==1.7.1+cpu ; sys_platform!='darwin'",
"torchvision==0.8.2 ; sys_platform=='darwin'",
"torchvision==0.8.2+cpu ; sys_platform!='darwin'",
"torchaudio==0.7.2",
"allennlp",
"dask[dataframe]",
"dask-ml",
# TODO(crcrpar): Support botorch v0.4.0.
# See: https://github.com/optuna/optuna/issues/2381
"botorch<0.4.0 ; python_version>'3.6'",
"fastai",
"optax",
"dm-haiku",
"hydra-optuna-sweeper",
],
"experimental": ["redis"],
"testing": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"bokeh<2.0.0",
"chainer>=5.0.0",
"cma",
"fakeredis",
"lightgbm",
"matplotlib>=3.0.0",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"plotly>=4.0.0",
"pytest",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=1.0.2",
"skorch",
"catalyst",
"torch==1.7.1 ; sys_platform=='darwin'",
"torch==1.7.1+cpu ; sys_platform!='darwin'",
"torchvision==0.8.2 ; sys_platform=='darwin'",
"torchvision==0.8.2+cpu ; sys_platform!='darwin'",
"torchaudio==0.7.2",
"allennlp",
# TODO(crcrpar): Support botorch v0.4.0.
# See: https://github.com/optuna/optuna/issues/2381
"botorch<0.4.0 ; python_version>'3.6'",
"fastai",
],
"tests": ["fakeredis", "pytest"],
"optional": [
"bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py.
"matplotlib>=3.0.0", # optuna/visualization/matplotlib
"pandas", # optuna/study.py
"plotly>=4.0.0", # optuna/visualization.
"redis", # optuna/storages/redis.py.
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
],
"integration": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"chainer>=5.0.0",
"cma",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=1.0.2",
"skorch",
"catalyst",
"torch==1.7.1 ; sys_platform=='darwin'",
"torch==1.7.1+cpu ; sys_platform!='darwin'",
"torchvision==0.8.2 ; sys_platform=='darwin'",
"torchvision==0.8.2+cpu ; sys_platform!='darwin'",
"torchaudio==0.7.2",
"allennlp",
# TODO(crcrpar): Support botorch v0.4.0.
# See: https://github.com/optuna/optuna/issues/2381
"botorch<0.4.0 ; python_version>'3.6'",
"fastai",
],
}
return requirements
|
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
# TODO(HideakiImamura) Unpin mypy version after fixing "Duplicate modules" error in
# examples and tutorials.
"checking": ["black", "hacking", "isort", "mypy==0.790", "blackdoc"],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
"matplotlib>=3.0.0",
"pandas",
"plotly>=4.0.0",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"mlflow",
],
"document": [
"sphinx",
"sphinx_rtd_theme",
"sphinx-copybutton",
"sphinx-gallery",
"sphinx-plotly-directive",
"pillow",
"matplotlib",
"scikit-learn",
"plotly>=4.0.0", # optuna/visualization.
"pandas",
"lightgbm",
"torch==1.7.1",
"torchvision==0.8.2",
"torchaudio==0.7.2",
"thop",
],
"example": [
"catboost",
"chainer",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"nbval",
"scikit-image",
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
"xgboost",
"keras",
"tensorflow>=2.0.0",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=1.0.2",
"thop",
"skorch",
"stable-baselines3>=0.7.0",
"catalyst",
"torch==1.7.1 ; sys_platform=='darwin'",
"torch==1.7.1+cpu ; sys_platform!='darwin'",
"torchvision==0.8.2 ; sys_platform=='darwin'",
"torchvision==0.8.2+cpu ; sys_platform!='darwin'",
"torchaudio==0.7.2",
"allennlp",
"dask[dataframe]",
"dask-ml",
# TODO(crcrpar): Support botorch v0.4.0.
# See: https://github.com/optuna/optuna/issues/2381
"botorch<0.4.0 ; python_version>'3.6'",
"fastai",
"optax",
"dm-haiku",
"hydra-optuna-sweeper",
],
"experimental": ["redis"],
"testing": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"bokeh<2.0.0",
"chainer>=5.0.0",
"cma",
"fakeredis",
"lightgbm",
"matplotlib>=3.0.0",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"plotly>=4.0.0",
"pytest",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=1.0.2",
"skorch",
"catalyst",
"torch==1.7.1 ; sys_platform=='darwin'",
"torch==1.7.1+cpu ; sys_platform!='darwin'",
"torchvision==0.8.2 ; sys_platform=='darwin'",
"torchvision==0.8.2+cpu ; sys_platform!='darwin'",
"torchaudio==0.7.2",
"allennlp",
# TODO(crcrpar): Support botorch v0.4.0.
# See: https://github.com/optuna/optuna/issues/2381
"botorch<0.4.0 ; python_version>'3.6'",
"fastai",
],
"tests": ["fakeredis", "pytest"],
"optional": [
"bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py.
"matplotlib>=3.0.0", # optuna/visualization/matplotlib
"pandas", # optuna/study.py
"plotly>=4.0.0", # optuna/visualization.
"redis", # optuna/storages/redis.py.
"scikit-learn>=0.19.0,<0.23.0", # optuna/visualization/param_importances.py.
],
"integration": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"chainer>=5.0.0",
"cma",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"scikit-learn>=0.19.0,<0.23.0",
"scikit-optimize",
"xgboost",
"keras",
"tensorflow",
"tensorflow-datasets",
"pytorch-ignite",
"pytorch-lightning>=1.0.2",
"skorch",
"catalyst",
"torch==1.7.1 ; sys_platform=='darwin'",
"torch==1.7.1+cpu ; sys_platform!='darwin'",
"torchvision==0.8.2 ; sys_platform=='darwin'",
"torchvision==0.8.2+cpu ; sys_platform!='darwin'",
"torchaudio==0.7.2",
"allennlp>=1.0.0",
# TODO(crcrpar): Support botorch v0.4.0.
# See: https://github.com/optuna/optuna/issues/2381
"botorch<0.4.0 ; python_version>'3.6'",
"fastai",
],
}
return requirements
|
32,427 |
def air_acquire_command(client: Client, args: Dict[str, Any]) -> CommandResults:
'''Command handler for acquire command'''
endpoint = args.get('endpoint', None)
profile = args.get('profile', None)
caseid = args.get('caseid', None)
organization_id = args.get('organization_id', None)
result: Dict[str, Any] = client.air_acquire(endpoint, profile, caseid, organization_id)
return CommandResults(
outputs_prefix='Binalyze.Air.Acquisition',
outputs_key_field='endpoint',
outputs=result,
)
|
def air_acquire_command(client: Client, args: Dict[str, Any]) -> CommandResults:
'''Command handler for acquire command'''
endpoint = args.get('endpoint', None)
profile = args.get('profile', None)
case_id = args.get('case_id', None)
organization_id = args.get('organization_id', None)
result: Dict[str, Any] = client.air_acquire(endpoint, profile, caseid, organization_id)
return CommandResults(
outputs_prefix='Binalyze.Air.Acquisition',
outputs_key_field='endpoint',
outputs=result,
)
|
27,658 |
def get_issue_link(pr_url):
response = requests.get(pr_url).json()
body = response.get("body")
if body:
match = re.search(r'(?:(?:Issue Number)|(?:fix)|(?:bug)).*?(https?://(?:www\.)?github\.com/.*?/issues/(\d+))', body)
if match:
issue_url = match.group(1)
issue_num = match.group(2)
return issue_url, issue_num
return None, None
|
def get_issue_link(pr_url):
response = requests.get(pr_url).json()
body = response.get("body")
if body:
match = re.search(r'(?:(?:Issue Number)|(?:fix)|(?:bug)|(?:cc)|(?:ref)).*?(https?://(?:www\.)?github\.com/.*?/issues/(\d+))', body)
if match:
issue_url = match.group(1)
issue_num = match.group(2)
return issue_url, issue_num
return None, None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.