id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
45,714 | def forecast(
R,
metadata,
V,
timesteps,
n_ens_members=24,
n_cascade_levels=6,
win_size=256,
overlap=0.1,
war_thr=0.1,
extrap_method="semilagrangian",
decomp_method="fft",
bandpass_filter_method="gaussian",
noise_method="ssft",
ar_order=2,
vel_pert_method=None,
probmatching_method="cdf",
mask_method="incremental",
callback=None,
fft_method="numpy",
return_output=True,
seed=None,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
noise_kwargs=None,
vel_pert_kwargs=None,
mask_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast ensemble by using the Short-space ensemble prediction
system (SSEPS) method.
This is an experimental version of STEPS which allows for localization
by means of a window function.
Parameters
----------
R: array-like
Array of shape (ar_order+1,m,n) containing the input precipitation fields
ordered by timestamp from oldest to newest. The time steps between the inputs
are assumed to be regular, and the inputs are required to have finite values.
metadata: dict
Metadata dictionary containing the accutime, xpixelsize, threshold and
zerovalue attributes as described in the documentation of
:py:mod:`pysteps.io.importers`. xpixelsize is assumed to be in meters.
V: array-like
Array of shape (2,m,n) containing the x- and y-components of the advection
field. The velocities are assumed to represent one time step between the
inputs. All values are required to be finite.
win_size: int or two-element sequence of ints
Size-length of the localization window.
overlap: float [0,1[
A float between 0 and 1 prescribing the level of overlap between
successive windows. If set to 0, no overlap is used.
war_thr: float
Threshold for the minimum fraction of rain in a given window.
timesteps: int or list
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
n_ens_members: int
The number of ensemble members to generate.
n_cascade_levels: int
The number of cascade levels to use.
extrap_method: {'semilagrangian'}
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
decomp_method: {'fft'}
Name of the cascade decomposition method to use. See the documentation
of pysteps.cascade.interface.
bandpass_filter_method: {'gaussian', 'uniform'}
Name of the bandpass filter method to use with the cascade
decomposition.
noise_method: {'parametric','nonparametric','ssft','nested',None}
Name of the noise generator to use for perturbating the precipitation
field. See the documentation of pysteps.noise.interface. If set to None,
no noise is generated.
ar_order: int
The order of the autoregressive model to use. Must be >= 1.
vel_pert_method: {'bps',None}
Name of the noise generator to use for perturbing the advection field.
See the documentation of pysteps.noise.interface. If set to None,
the advection field is not perturbed.
mask_method: {'incremental', None}
The method to use for masking no precipitation areas in the forecast
field. The masked pixels are set to the minimum value of the
observations. 'incremental' = iteratively buffer the mask with a
certain rate (currently it is 1 km/min), None=no masking.
probmatching_method: {'cdf', None}
Method for matching the statistics of the forecast field with those of
the most recently observed one. 'cdf'=map the forecast CDF to the
observed one, None=no matching applied. Using 'mean' requires
that mask_method is not None.
callback: function
Optional function that is called after computation of each time step of
the nowcast. The function takes one argument: a three-dimensional array
of shape (n_ens_members,h,w), where h and w are the height and width
of the input field R, respectively. This can be used, for instance,
writing the outputs into files.
return_output: bool
Set to False to disable returning the outputs as numpy arrays. This can
save memory if the intermediate results are written to output files
using the callback function.
seed: int
Optional seed number for the random generators.
num_workers: int
The number of workers to use for parallel computation. Applicable if
dask is enabled or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting the
environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
fft_method: str
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
extrap_kwargs: dict
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
noise_kwargs: dict
Optional dictionary containing keyword arguments for the initializer of
the noise generator. See the documentation of
pysteps.noise.fftgenerators.
vel_pert_kwargs: dict
Optional dictionary containing keyword arguments "p_pert_par" and
"p_pert_perp" for the initializer of the velocity perturbator.
See the documentation of pysteps.noise.motion.
mask_kwargs: dict
Optional dictionary containing mask keyword arguments 'mask_f' and
'mask_rim', the factor defining the the mask increment and the rim size,
respectively.
The mask increment is defined as mask_f*timestep/kmperpixel.
measure_time: bool
If set to True, measure, print and return the computation time.
Returns
-------
out: ndarray
If return_output is True, a four-dimensional array of shape
(n_ens_members,num_timesteps,m,n) containing a time series of forecast
precipitation fields for each ensemble member. Otherwise, a None value
is returned. The time series starts from t0+timestep, where timestep is
taken from the input precipitation fields R.
See also
--------
pysteps.extrapolation.interface, pysteps.cascade.interface,
pysteps.noise.interface, pysteps.noise.utils.compute_noise_stddev_adjs
Notes
-----
Please be aware that this represents a (very) experimental implementation.
References
----------
:cite:`Seed2003`, :cite:`BPS2006`, :cite:`SPN2013`, :cite:`NBSG2017`
"""
_check_inputs(R, V, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
if noise_kwargs is None:
noise_kwargs = dict()
if vel_pert_kwargs is None:
vel_pert_kwargs = dict()
if mask_kwargs is None:
mask_kwargs = dict()
if np.any(~np.isfinite(R)):
raise ValueError("R contains non-finite values")
if np.any(~np.isfinite(V)):
raise ValueError("V contains non-finite values")
if mask_method not in ["incremental", None]:
raise ValueError(
"unknown mask method %s: must be 'incremental' or None" % mask_method
)
if np.isscalar(win_size):
win_size = (np.int(win_size), np.int(win_size))
else:
win_size = tuple([np.int(win_size[i]) for i in range(2)])
timestep = metadata["accutime"]
kmperpixel = metadata["xpixelsize"] / 1000
print("Computing SSEPS nowcast:")
print("------------------------")
print("")
print("Inputs:")
print("-------")
print("input dimensions: %dx%d" % (R.shape[1], R.shape[2]))
print("km/pixel: %g" % kmperpixel)
print("time step: %d minutes" % timestep)
print("")
print("Methods:")
print("--------")
print("extrapolation: %s" % extrap_method)
print("bandpass filter: %s" % bandpass_filter_method)
print("decomposition: %s" % decomp_method)
print("noise generator: %s" % noise_method)
print("velocity perturbator: %s" % vel_pert_method)
print("precip. mask method: %s" % mask_method)
print("probability matching: %s" % probmatching_method)
print("FFT method: %s" % fft_method)
print("")
print("Parameters:")
print("-----------")
print("localization window: %dx%d" % (win_size[0], win_size[1]))
print("overlap: %.1f" % overlap)
print("war thr: %.2f" % war_thr)
if isinstance(timesteps, int):
print("number of time steps: %d" % timesteps)
else:
print("time steps: %s" % timesteps)
print("ensemble size: %d" % n_ens_members)
print("number of cascade levels: %d" % n_cascade_levels)
print("order of the AR(p) model: %d" % ar_order)
print("dask imported: %s" % ("yes" if dask_imported else "no"))
print("num workers: %d" % num_workers)
if vel_pert_method == "bps":
vp_par = vel_pert_kwargs.get(
"p_pert_par", noise.motion.get_default_params_bps_par()
)
vp_perp = vel_pert_kwargs.get(
"p_pert_perp", noise.motion.get_default_params_bps_perp()
)
print(
"velocity perturbations, parallel: %g,%g,%g"
% (vp_par[0], vp_par[1], vp_par[2])
)
print(
"velocity perturbations, perpendicular: %g,%g,%g"
% (vp_perp[0], vp_perp[1], vp_perp[2])
)
R_thr = metadata["threshold"]
R_min = metadata["zerovalue"]
num_ensemble_workers = n_ens_members if num_workers > n_ens_members else num_workers
if measure_time:
starttime_init = time.time()
# get methods
extrapolator_method = extrapolation.get_method(extrap_method)
x_values, y_values = np.meshgrid(np.arange(R.shape[2]), np.arange(R.shape[1]))
xy_coords = np.stack([x_values, y_values])
decomp_method, __ = cascade.get_method(decomp_method)
filter_method = cascade.get_method(bandpass_filter_method)
if noise_method is not None:
init_noise, generate_noise = noise.get_method(noise_method)
# advect the previous precipitation fields to the same position with the
# most recent one (i.e. transform them into the Lagrangian coordinates)
R = R[-(ar_order + 1) :, :, :].copy()
extrap_kwargs = extrap_kwargs.copy()
extrap_kwargs["xy_coords"] = xy_coords
res = []
f = lambda R, i: extrapolator_method(
R[i, :, :], V, ar_order - i, "min", **extrap_kwargs
)[-1]
for i in range(ar_order):
if not dask_imported:
R[i, :, :] = f(R, i)
else:
res.append(dask.delayed(f)(R, i))
if dask_imported:
num_workers_ = len(res) if num_workers > len(res) else num_workers
R = np.stack(list(dask.compute(*res, num_workers=num_workers_)) + [R[-1, :, :]])
if mask_method == "incremental":
# get mask parameters
mask_rim = mask_kwargs.get("mask_rim", 10)
mask_f = mask_kwargs.get("mask_f", 1.0)
# initialize the structuring element
struct = scipy.ndimage.generate_binary_structure(2, 1)
# iterate it to expand it nxn
n = mask_f * timestep / kmperpixel
struct = scipy.ndimage.iterate_structure(struct, int((n - 1) / 2.0))
noise_kwargs.update(
{
"win_size": win_size,
"overlap": overlap,
"war_thr": war_thr,
"rm_rdisc": True,
"donorm": True,
}
)
print("Estimating nowcast parameters...", end="")
def estimator(R, parsglob=None, idxm=None, idxn=None):
pars = {}
# initialize the perturbation generator for the precipitation field
if noise_method is not None and parsglob is None:
P = init_noise(R, fft_method=fft_method, **noise_kwargs)
else:
P = None
pars["P"] = P
# initialize the band-pass filter
if parsglob is None:
filter = filter_method(R.shape[1:], n_cascade_levels, **filter_kwargs)
pars["filter"] = filter
else:
pars["filter"] = None
# compute the cascade decompositions of the input precipitation fields
if parsglob is None:
R_d = []
for i in range(ar_order + 1):
R_d_ = decomp_method(
R[i, :, :],
filter,
fft_method=fft_method,
normalize=True,
compute_stats=True,
)
R_d.append(R_d_)
R_d_ = None
# normalize the cascades and rearrange them into a four-dimensional array
# of shape (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
if parsglob is None:
R_c = nowcast_utils.stack_cascades(R_d, n_cascade_levels)
mu = R_d[-1]["means"]
sigma = R_d[-1]["stds"]
R_d = None
else:
R_c = parsglob["R_c"][0][
:, :, idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
mu = np.mean(R_c, axis=(2, 3))
sigma = np.std(R_c, axis=(2, 3))
R_c = (R_c - mu[:, :, None, None]) / sigma[:, :, None, None]
mu = mu[:, -1]
sigma = sigma[:, -1]
pars["mu"] = mu
pars["sigma"] = sigma
# compute lag-l temporal autocorrelation coefficients for each cascade level
GAMMA = np.empty((n_cascade_levels, ar_order))
for i in range(n_cascade_levels):
R_c_ = np.stack([R_c[i, j, :, :] for j in range(ar_order + 1)])
GAMMA[i, :] = correlation.temporal_autocorrelation(R_c_)
R_c_ = None
if ar_order == 2:
# adjust the local lag-2 correlation coefficient to ensure that the AR(p)
# process is stationary
for i in range(n_cascade_levels):
GAMMA[i, 1] = autoregression.adjust_lag2_corrcoef2(
GAMMA[i, 0], GAMMA[i, 1]
)
# estimate the parameters of the AR(p) model from the autocorrelation
# coefficients
PHI = np.empty((n_cascade_levels, ar_order + 1))
for i in range(n_cascade_levels):
PHI[i, :] = autoregression.estimate_ar_params_yw(GAMMA[i, :])
pars["PHI"] = PHI
# stack the cascades into a five-dimensional array containing all ensemble
# members
R_c = [R_c.copy() for i in range(n_ens_members)]
pars["R_c"] = R_c
if mask_method is not None and parsglob is None:
MASK_prec = R[-1, :, :] >= R_thr
if mask_method == "incremental":
# initialize precip mask for each member
MASK_prec = _compute_incremental_mask(MASK_prec, struct, mask_rim)
MASK_prec = [MASK_prec.copy() for j in range(n_ens_members)]
else:
MASK_prec = None
pars["MASK_prec"] = MASK_prec
return pars
# prepare windows
M, N = R.shape[1:]
n_windows_M = np.ceil(1.0 * M / win_size[0]).astype(int)
n_windows_N = np.ceil(1.0 * N / win_size[1]).astype(int)
idxm = np.zeros((2, 1), dtype=int)
idxn = np.zeros((2, 1), dtype=int)
if measure_time:
starttime = time.time()
# compute global parameters to be used as defaults
parsglob = estimator(R)
# loop windows
if n_windows_M > 1 or n_windows_N > 1:
war = np.empty((n_windows_M, n_windows_N))
PHI = np.empty((n_windows_M, n_windows_N, n_cascade_levels, ar_order + 1))
mu = np.empty((n_windows_M, n_windows_N, n_cascade_levels))
sigma = np.empty((n_windows_M, n_windows_N, n_cascade_levels))
ff = []
rc = []
pp = []
mm = []
for m in range(n_windows_M):
ff_ = []
pp_ = []
rc_ = []
mm_ = []
for n in range(n_windows_N):
# compute indices of local window
idxm[0] = int(np.max((m * win_size[0] - overlap * win_size[0], 0)))
idxm[1] = int(
np.min((idxm[0] + win_size[0] + overlap * win_size[0], M))
)
idxn[0] = int(np.max((n * win_size[1] - overlap * win_size[1], 0)))
idxn[1] = int(
np.min((idxn[0] + win_size[1] + overlap * win_size[1], N))
)
mask = np.zeros((M, N), dtype=bool)
mask[idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)] = True
R_ = R[:, idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)]
war[m, n] = np.sum(R_[-1, :, :] >= R_thr) / R_[-1, :, :].size
if war[m, n] > war_thr:
# estimate local parameters
pars = estimator(R, parsglob, idxm, idxn)
ff_.append(pars["filter"])
pp_.append(pars["P"])
rc_.append(pars["R_c"])
mm_.append(pars["MASK_prec"])
mu[m, n, :] = pars["mu"]
sigma[m, n, :] = pars["sigma"]
PHI[m, n, :, :] = pars["PHI"]
else:
# dry window
ff_.append(None)
pp_.append(None)
rc_.append(None)
mm_.append(None)
ff.append(ff_)
pp.append(pp_)
rc.append(rc_)
mm.append(mm_)
# remove unnecessary variables
ff_ = None
pp_ = None
rc_ = None
mm_ = None
pars = None
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print(" done.")
# initialize the random generators
if noise_method is not None:
randgen_prec = []
randgen_motion = []
np.random.seed(seed)
for j in range(n_ens_members):
rs = np.random.RandomState(seed)
randgen_prec.append(rs)
seed = rs.randint(0, high=1e9)
rs = np.random.RandomState(seed)
randgen_motion.append(rs)
seed = rs.randint(0, high=1e9)
if vel_pert_method is not None:
init_vel_noise, generate_vel_noise = noise.get_method(vel_pert_method)
# initialize the perturbation generators for the motion field
vps = []
for j in range(n_ens_members):
kwargs = {
"randstate": randgen_motion[j],
"p_par": vp_par,
"p_perp": vp_perp,
}
vp_ = init_vel_noise(V, 1.0 / kmperpixel, timestep, **kwargs)
vps.append(vp_)
D = [None for j in range(n_ens_members)]
R_f = [[] for j in range(n_ens_members)]
t_nowcast = 0
if measure_time:
init_time = time.time() - starttime_init
R = R[-1, :, :]
print("Starting nowcast computation.")
if measure_time:
starttime_mainloop = time.time()
if isinstance(timesteps, int):
timesteps = range(timesteps + 1)
timestep_type = "int"
else:
original_timesteps = [0] + list(timesteps)
timesteps = nowcast_utils.binned_timesteps(original_timesteps)
timestep_type = "list"
extrap_kwargs["return_displacement"] = True
R_f_prev = [R for i in range(n_ens_members)]
t_prev = [0.0 for j in range(n_ens_members)]
t_total = [0.0 for j in range(n_ens_members)]
# iterate each time step
for t in range(len(timesteps)):
if timestep_type == "list":
subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]]
else:
subtimesteps = [t]
if len(subtimesteps) > 1 or t > 0:
nowcast_time_step = True
else:
nowcast_time_step = False
if nowcast_time_step:
print(
"Computing nowcast for time step %d... " % (t_nowcast + 1),
end="",
flush=True,
)
t_nowcast += 1
if measure_time:
starttime = time.time()
# iterate each ensemble member
def worker(j):
# first the global step
if noise_method is not None:
# generate noise field
EPS = generate_noise(
parsglob["P"], randstate=randgen_prec[j], fft_method=fft_method
)
# decompose the noise field into a cascade
EPS_d = decomp_method(
EPS,
parsglob["filter"],
fft_method=fft_method,
normalize=True,
compute_stats=True,
)
else:
EPS_d = None
# iterate the AR(p) model for each cascade level
R_c = parsglob["R_c"][j].copy()
if R_c.shape[1] >= ar_order:
R_c = R_c[:, -ar_order:, :, :].copy()
for i in range(n_cascade_levels):
# normalize the noise cascade
if EPS_d is not None:
EPS_ = (
EPS_d["cascade_levels"][i, :, :] - EPS_d["means"][i]
) / EPS_d["stds"][i]
else:
EPS_ = None
# apply AR(p) process to cascade level
R_c[i, :, :, :] = autoregression.iterate_ar_model(
R_c[i, :, :, :], parsglob["PHI"][i, :], eps=EPS_
)
EPS_ = None
parsglob["R_c"][j] = R_c.copy()
EPS = None
# compute the recomposed precipitation field(s) from the cascades
# obtained from the AR(p) model(s)
R_f_new = _recompose_cascade(R_c, parsglob["mu"], parsglob["sigma"])
R_c = None
# then the local steps
if n_windows_M > 1 or n_windows_N > 1:
idxm = np.zeros((2, 1), dtype=int)
idxn = np.zeros((2, 1), dtype=int)
R_l = np.zeros((M, N), dtype=float)
M_s = np.zeros((M, N), dtype=float)
for m in range(n_windows_M):
for n in range(n_windows_N):
# compute indices of local window
idxm[0] = int(
np.max((m * win_size[0] - overlap * win_size[0], 0))
)
idxm[1] = int(
np.min((idxm[0] + win_size[0] + overlap * win_size[0], M))
)
idxn[0] = int(
np.max((n * win_size[1] - overlap * win_size[1], 0))
)
idxn[1] = int(
np.min((idxn[0] + win_size[1] + overlap * win_size[1], N))
)
# build localization mask
mask = _get_mask((M, N), idxm, idxn)
mask_l = mask[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
]
M_s += mask
# skip if dry
if war[m, n] > war_thr:
R_c = rc[m][n][j].copy()
if R_c.shape[1] >= ar_order:
R_c = R_c[:, -ar_order:, :, :]
if noise_method is not None:
# extract noise field
EPS_d_l = EPS_d["cascade_levels"][
:,
idxm.item(0) : idxm.item(1),
idxn.item(0) : idxn.item(1),
].copy()
mu_ = np.mean(EPS_d_l, axis=(1, 2))
sigma_ = np.std(EPS_d_l, axis=(1, 2))
else:
EPS_d_l = None
# iterate the AR(p) model for each cascade level
for i in range(n_cascade_levels):
# normalize the noise cascade
if EPS_d_l is not None:
EPS_ = (
EPS_d_l[i, :, :] - mu_[i, None, None]
) / sigma_[i, None, None]
else:
EPS_ = None
# apply AR(p) process to cascade level
R_c[i, :, :, :] = autoregression.iterate_ar_model(
R_c[i, :, :, :], PHI[m, n, i, :], eps=EPS_
)
EPS_ = None
rc[m][n][j] = R_c.copy()
EPS_d_l = mu_ = sigma_ = None
# compute the recomposed precipitation field(s) from the cascades
# obtained from the AR(p) model(s)
mu_ = mu[m, n, :]
sigma_ = sigma[m, n, :]
R_c = [
((R_c[i, -1, :, :] * sigma_[i]) + mu_[i])
* parsglob["sigma"][i]
+ parsglob["mu"][i]
for i in range(len(mu_))
]
R_l_ = np.sum(np.stack(R_c), axis=0)
R_c = mu_ = sigma_ = None
# R_l_ = _recompose_cascade(R_c[:, :, :], mu[m, n, :], sigma[m, n, :])
else:
R_l_ = R_f_new[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
if probmatching_method == "cdf":
# adjust the CDF of the forecast to match the most recently
# observed precipitation field
R_ = R[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
R_l_ = probmatching.nonparam_match_empirical_cdf(R_l_, R_)
R_ = None
R_l[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
] += (R_l_ * mask_l)
R_l_ = None
ind = M_s > 0
R_l[ind] *= 1 / M_s[ind]
R_l[~ind] = R_min
R_f_new = R_l.copy()
R_l = None
if probmatching_method == "cdf":
# adjust the CDF of the forecast to match the most recently
# observed precipitation field
R_f_new[R_f_new < R_thr] = R_min
R_f_new = probmatching.nonparam_match_empirical_cdf(R_f_new, R)
if mask_method is not None:
# apply the precipitation mask to prevent generation of new
# precipitation into areas where it was not originally
# observed
if mask_method == "incremental":
MASK_prec = parsglob["MASK_prec"][j].copy()
R_f_new = R_f_new.min() + (R_f_new - R_f_new.min()) * MASK_prec
MASK_prec = None
if mask_method == "incremental":
parsglob["MASK_prec"][j] = _compute_incremental_mask(
R_f_new >= R_thr, struct, mask_rim
)
R_f_out = []
extrap_kwargs_ = extrap_kwargs.copy()
extrap_kwargs_["xy_coords"] = xy_coords
extrap_kwargs_["return_displacement"] = True
V_pert = V
# advect the recomposed precipitation field to obtain the forecast for
# the current time step (or subtimesteps if non-integer time steps are
# given)
for t_sub in subtimesteps:
if t_sub > 0:
t_diff_prev_int = t_sub - int(t_sub)
if t_diff_prev_int > 0.0:
R_f_ip = (1.0 - t_diff_prev_int) * R_f_prev[
j
] + t_diff_prev_int * R_f_new
else:
R_f_ip = R_f_prev[j]
t_diff_prev = t_sub - t_prev[j]
t_total[j] += t_diff_prev
# compute the perturbed motion field
if vel_pert_method is not None:
V_pert = V + generate_vel_noise(vps[j], t_total[j] * timestep)
extrap_kwargs_["displacement_prev"] = D[j]
R_f_ep, D[j] = extrapolator_method(
R_f_ip,
V_pert,
[t_diff_prev],
**extrap_kwargs_,
)
R_f_ep[0][R_f_ep[0] < R_thr] = R_min
R_f_out.append(R_f_ep[0])
t_prev[j] = t_sub
# advect the forecast field by one time step if no subtimesteps in the
# current interval were found
if len(subtimesteps) == 0:
t_diff_prev = t + 1 - t_prev[j]
t_total[j] += t_diff_prev
# compute the perturbed motion field
if vel_pert_method is not None:
V_pert = V + generate_vel_noise(vps[j], t_total[j] * timestep)
extrap_kwargs_["displacement_prev"] = D[j]
_, D[j] = extrapolator_method(
None,
V_pert,
[t_diff_prev],
**extrap_kwargs_,
)
t_prev[j] = t + 1
R_f_prev[j] = R_f_new
return R_f_out
res = []
for j in range(n_ens_members):
if not dask_imported or n_ens_members == 1:
res.append(worker(j))
else:
res.append(dask.delayed(worker)(j))
R_f_ = (
dask.compute(*res, num_workers=num_ensemble_workers)
if dask_imported and n_ens_members > 1
else res
)
res = None
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print("done.")
if callback is not None:
callback(np.stack(R_f_))
R_f_ = None
if return_output:
for j in range(n_ens_members):
R_f[j].extend(R_f_[j])
if measure_time:
mainloop_time = time.time() - starttime_mainloop
if return_output:
outarr = np.stack([np.stack(R_f[j]) for j in range(n_ens_members)])
if measure_time:
return outarr, init_time, mainloop_time
else:
return outarr
else:
return None
| def forecast(
R,
metadata,
V,
timesteps,
n_ens_members=24,
n_cascade_levels=6,
win_size=256,
overlap=0.1,
war_thr=0.1,
extrap_method="semilagrangian",
decomp_method="fft",
bandpass_filter_method="gaussian",
noise_method="ssft",
ar_order=2,
vel_pert_method=None,
probmatching_method="cdf",
mask_method="incremental",
callback=None,
fft_method="numpy",
return_output=True,
seed=None,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
noise_kwargs=None,
vel_pert_kwargs=None,
mask_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast ensemble by using the Short-space ensemble prediction
system (SSEPS) method.
This is an experimental version of STEPS which allows for localization
by means of a window function.
Parameters
----------
R: array-like
Array of shape (ar_order+1,m,n) containing the input precipitation fields
ordered by timestamp from oldest to newest. The time steps between the inputs
are assumed to be regular, and the inputs are required to have finite values.
metadata: dict
Metadata dictionary containing the accutime, xpixelsize, threshold and
zerovalue attributes as described in the documentation of
:py:mod:`pysteps.io.importers`. xpixelsize is assumed to be in meters.
V: array-like
Array of shape (2,m,n) containing the x- and y-components of the advection
field. The velocities are assumed to represent one time step between the
inputs. All values are required to be finite.
win_size: int or two-element sequence of ints
Size-length of the localization window.
overlap: float [0,1[
A float between 0 and 1 prescribing the level of overlap between
successive windows. If set to 0, no overlap is used.
war_thr: float
Threshold for the minimum fraction of rain in a given window.
timesteps: int or list
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
n_ens_members: int
The number of ensemble members to generate.
n_cascade_levels: int
The number of cascade levels to use.
extrap_method: {'semilagrangian'}
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
decomp_method: {'fft'}
Name of the cascade decomposition method to use. See the documentation
of pysteps.cascade.interface.
bandpass_filter_method: {'gaussian', 'uniform'}
Name of the bandpass filter method to use with the cascade
decomposition.
noise_method: {'parametric','nonparametric','ssft','nested',None}
Name of the noise generator to use for perturbating the precipitation
field. See the documentation of pysteps.noise.interface. If set to None,
no noise is generated.
ar_order: int
The order of the autoregressive model to use. Must be >= 1.
vel_pert_method: {'bps',None}
Name of the noise generator to use for perturbing the advection field.
See the documentation of pysteps.noise.interface. If set to None,
the advection field is not perturbed.
mask_method: {'incremental', None}
The method to use for masking no precipitation areas in the forecast
field. The masked pixels are set to the minimum value of the
observations. 'incremental' = iteratively buffer the mask with a
certain rate (currently it is 1 km/min), None=no masking.
probmatching_method: {'cdf', None}
Method for matching the statistics of the forecast field with those of
the most recently observed one. 'cdf'=map the forecast CDF to the
observed one, None=no matching applied. Using 'mean' requires
that mask_method is not None.
callback: function
Optional function that is called after computation of each time step of
the nowcast. The function takes one argument: a three-dimensional array
of shape (n_ens_members,h,w), where h and w are the height and width
of the input field R, respectively. This can be used, for instance,
writing the outputs into files.
return_output: bool
Set to False to disable returning the outputs as numpy arrays. This can
save memory if the intermediate results are written to output files
using the callback function.
seed: int
Optional seed number for the random generators.
num_workers: int
The number of workers to use for parallel computation. Applicable if
dask is enabled or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting the
environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
fft_method: str
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
extrap_kwargs: dict
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
noise_kwargs: dict
Optional dictionary containing keyword arguments for the initializer of
the noise generator. See the documentation of
pysteps.noise.fftgenerators.
vel_pert_kwargs: dict
Optional dictionary containing keyword arguments "p_pert_par" and
"p_pert_perp" for the initializer of the velocity perturbator.
See the documentation of pysteps.noise.motion.
mask_kwargs: dict
Optional dictionary containing mask keyword arguments 'mask_f' and
'mask_rim', the factor defining the the mask increment and the rim size,
respectively.
The mask increment is defined as mask_f*timestep/kmperpixel.
measure_time: bool
If set to True, measure, print and return the computation time.
Returns
-------
out: ndarray
If return_output is True, a four-dimensional array of shape
(n_ens_members,num_timesteps,m,n) containing a time series of forecast
precipitation fields for each ensemble member. Otherwise, a None value
is returned. The time series starts from t0+timestep, where timestep is
taken from the input precipitation fields R.
See also
--------
pysteps.extrapolation.interface, pysteps.cascade.interface,
pysteps.noise.interface, pysteps.noise.utils.compute_noise_stddev_adjs
Notes
-----
Please be aware that this represents a (very) experimental implementation.
References
----------
:cite:`Seed2003`, :cite:`BPS2006`, :cite:`SPN2013`, :cite:`NBSG2017`
"""
_check_inputs(R, V, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
if noise_kwargs is None:
noise_kwargs = dict()
if vel_pert_kwargs is None:
vel_pert_kwargs = dict()
if mask_kwargs is None:
mask_kwargs = dict()
if np.any(~np.isfinite(R)):
raise ValueError("R contains non-finite values")
if np.any(~np.isfinite(V)):
raise ValueError("V contains non-finite values")
if mask_method not in ["incremental", None]:
raise ValueError(
"unknown mask method %s: must be 'incremental' or None" % mask_method
)
if np.isscalar(win_size):
win_size = (np.int(win_size), np.int(win_size))
else:
win_size = tuple([np.int(win_size[i]) for i in range(2)])
timestep = metadata["accutime"]
kmperpixel = metadata["xpixelsize"] / 1000
print("Computing SSEPS nowcast:")
print("------------------------")
print("")
print("Inputs:")
print("-------")
print("input dimensions: %dx%d" % (R.shape[1], R.shape[2]))
print("km/pixel: %g" % kmperpixel)
print("time step: %d minutes" % timestep)
print("")
print("Methods:")
print("--------")
print("extrapolation: %s" % extrap_method)
print("bandpass filter: %s" % bandpass_filter_method)
print("decomposition: %s" % decomp_method)
print("noise generator: %s" % noise_method)
print("velocity perturbator: %s" % vel_pert_method)
print("precip. mask method: %s" % mask_method)
print("probability matching: %s" % probmatching_method)
print("FFT method: %s" % fft_method)
print("")
print("Parameters:")
print("-----------")
print("localization window: %dx%d" % (win_size[0], win_size[1]))
print("overlap: %.1f" % overlap)
print("war thr: %.2f" % war_thr)
if isinstance(timesteps, int):
print("number of time steps: %d" % timesteps)
else:
print("time steps: %s" % timesteps)
print("ensemble size: %d" % n_ens_members)
print("number of cascade levels: %d" % n_cascade_levels)
print("order of the AR(p) model: %d" % ar_order)
print("dask imported: %s" % ("yes" if dask_imported else "no"))
print("num workers: %d" % num_workers)
if vel_pert_method == "bps":
vp_par = vel_pert_kwargs.get(
"p_pert_par", noise.motion.get_default_params_bps_par()
)
vp_perp = vel_pert_kwargs.get(
"p_pert_perp", noise.motion.get_default_params_bps_perp()
)
print(
"velocity perturbations, parallel: %g,%g,%g"
% (vp_par[0], vp_par[1], vp_par[2])
)
print(
"velocity perturbations, perpendicular: %g,%g,%g"
% (vp_perp[0], vp_perp[1], vp_perp[2])
)
R_thr = metadata["threshold"]
R_min = metadata["zerovalue"]
num_ensemble_workers = n_ens_members if num_workers > n_ens_members else num_workers
if measure_time:
starttime_init = time.time()
# get methods
extrapolator_method = extrapolation.get_method(extrap_method)
x_values, y_values = np.meshgrid(np.arange(R.shape[2]), np.arange(R.shape[1]))
xy_coords = np.stack([x_values, y_values])
decomp_method, __ = cascade.get_method(decomp_method)
filter_method = cascade.get_method(bandpass_filter_method)
if noise_method is not None:
init_noise, generate_noise = noise.get_method(noise_method)
# advect the previous precipitation fields to the same position with the
# most recent one (i.e. transform them into the Lagrangian coordinates)
R = R[-(ar_order + 1) :, :, :].copy()
extrap_kwargs = extrap_kwargs.copy()
extrap_kwargs["xy_coords"] = xy_coords
res = []
f = lambda R, i: extrapolator_method(
R[i, :, :], V, ar_order - i, "min", **extrap_kwargs
)[-1]
for i in range(ar_order):
if not dask_imported:
R[i, :, :] = f(R, i)
else:
res.append(dask.delayed(f)(R, i))
if dask_imported:
num_workers_ = len(res) if num_workers > len(res) else num_workers
R = np.stack(list(dask.compute(*res, num_workers=num_workers_)) + [R[-1, :, :]])
if mask_method == "incremental":
# get mask parameters
mask_rim = mask_kwargs.get("mask_rim", 10)
mask_f = mask_kwargs.get("mask_f", 1.0)
# initialize the structuring element
struct = scipy.ndimage.generate_binary_structure(2, 1)
# iterate it to expand it nxn
n = mask_f * timestep / kmperpixel
struct = scipy.ndimage.iterate_structure(struct, int((n - 1) / 2.0))
noise_kwargs.update(
{
"win_size": win_size,
"overlap": overlap,
"war_thr": war_thr,
"rm_rdisc": True,
"donorm": True,
}
)
print("Estimating nowcast parameters...", end="")
def estimator(R, parsglob=None, idxm=None, idxn=None):
pars = {}
# initialize the perturbation generator for the precipitation field
if noise_method is not None and parsglob is None:
P = init_noise(R, fft_method=fft_method, **noise_kwargs)
else:
P = None
pars["P"] = P
# initialize the band-pass filter
if parsglob is None:
filter = filter_method(R.shape[1:], n_cascade_levels, **filter_kwargs)
pars["filter"] = filter
else:
pars["filter"] = None
# compute the cascade decompositions of the input precipitation fields
if parsglob is None:
R_d = []
for i in range(ar_order + 1):
R_d_ = decomp_method(
R[i, :, :],
filter,
fft_method=fft_method,
normalize=True,
compute_stats=True,
)
R_d.append(R_d_)
R_d_ = None
# normalize the cascades and rearrange them into a four-dimensional array
# of shape (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
if parsglob is None:
R_c = nowcast_utils.stack_cascades(R_d, n_cascade_levels)
mu = R_d[-1]["means"]
sigma = R_d[-1]["stds"]
R_d = None
else:
R_c = parsglob["R_c"][0][
:, :, idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
mu = np.mean(R_c, axis=(2, 3))
sigma = np.std(R_c, axis=(2, 3))
R_c = (R_c - mu[:, :, None, None]) / sigma[:, :, None, None]
mu = mu[:, -1]
sigma = sigma[:, -1]
pars["mu"] = mu
pars["sigma"] = sigma
# compute lag-l temporal autocorrelation coefficients for each cascade level
GAMMA = np.empty((n_cascade_levels, ar_order))
for i in range(n_cascade_levels):
R_c_ = np.stack([R_c[i, j, :, :] for j in range(ar_order + 1)])
GAMMA[i, :] = correlation.temporal_autocorrelation(R_c_)
R_c_ = None
if ar_order == 2:
# adjust the local lag-2 correlation coefficient to ensure that the AR(p)
# process is stationary
for i in range(n_cascade_levels):
GAMMA[i, 1] = autoregression.adjust_lag2_corrcoef2(
GAMMA[i, 0], GAMMA[i, 1]
)
# estimate the parameters of the AR(p) model from the autocorrelation
# coefficients
PHI = np.empty((n_cascade_levels, ar_order + 1))
for i in range(n_cascade_levels):
PHI[i, :] = autoregression.estimate_ar_params_yw(GAMMA[i, :])
pars["PHI"] = PHI
# stack the cascades into a five-dimensional array containing all ensemble
# members
R_c = [R_c.copy() for i in range(n_ens_members)]
pars["R_c"] = R_c
if mask_method is not None and parsglob is None:
MASK_prec = R[-1, :, :] >= R_thr
if mask_method == "incremental":
# initialize precip mask for each member
MASK_prec = _compute_incremental_mask(MASK_prec, struct, mask_rim)
MASK_prec = [MASK_prec.copy() for j in range(n_ens_members)]
else:
MASK_prec = None
pars["MASK_prec"] = MASK_prec
return pars
# prepare windows
M, N = R.shape[1:]
n_windows_M = np.ceil(1.0 * M / win_size[0]).astype(int)
n_windows_N = np.ceil(1.0 * N / win_size[1]).astype(int)
idxm = np.zeros((2, 1), dtype=int)
idxn = np.zeros((2, 1), dtype=int)
if measure_time:
starttime = time.time()
# compute global parameters to be used as defaults
parsglob = estimator(R)
# loop windows
if n_windows_M > 1 or n_windows_N > 1:
war = np.empty((n_windows_M, n_windows_N))
PHI = np.empty((n_windows_M, n_windows_N, n_cascade_levels, ar_order + 1))
mu = np.empty((n_windows_M, n_windows_N, n_cascade_levels))
sigma = np.empty((n_windows_M, n_windows_N, n_cascade_levels))
ff = []
rc = []
pp = []
mm = []
for m in range(n_windows_M):
ff_ = []
pp_ = []
rc_ = []
mm_ = []
for n in range(n_windows_N):
# compute indices of local window
idxm[0] = int(np.max((m * win_size[0] - overlap * win_size[0], 0)))
idxm[1] = int(
np.min((idxm[0] + win_size[0] + overlap * win_size[0], M))
)
idxn[0] = int(np.max((n * win_size[1] - overlap * win_size[1], 0)))
idxn[1] = int(
np.min((idxn[0] + win_size[1] + overlap * win_size[1], N))
)
mask = np.zeros((M, N), dtype=bool)
mask[idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)] = True
R_ = R[:, idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)]
war[m, n] = np.sum(R_[-1, :, :] >= R_thr) / R_[-1, :, :].size
if war[m, n] > war_thr:
# estimate local parameters
pars = estimator(R, parsglob, idxm, idxn)
ff_.append(pars["filter"])
pp_.append(pars["P"])
rc_.append(pars["R_c"])
mm_.append(pars["MASK_prec"])
mu[m, n, :] = pars["mu"]
sigma[m, n, :] = pars["sigma"]
PHI[m, n, :, :] = pars["PHI"]
else:
# dry window
ff_.append(None)
pp_.append(None)
rc_.append(None)
mm_.append(None)
ff.append(ff_)
pp.append(pp_)
rc.append(rc_)
mm.append(mm_)
# remove unnecessary variables
ff_ = None
pp_ = None
rc_ = None
mm_ = None
pars = None
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print(" done.")
# initialize the random generators
if noise_method is not None:
randgen_prec = []
randgen_motion = []
np.random.seed(seed)
for j in range(n_ens_members):
rs = np.random.RandomState(seed)
randgen_prec.append(rs)
seed = rs.randint(0, high=1e9)
rs = np.random.RandomState(seed)
randgen_motion.append(rs)
seed = rs.randint(0, high=1e9)
if vel_pert_method is not None:
init_vel_noise, generate_vel_noise = noise.get_method(vel_pert_method)
# initialize the perturbation generators for the motion field
vps = []
for j in range(n_ens_members):
kwargs = {
"randstate": randgen_motion[j],
"p_par": vp_par,
"p_perp": vp_perp,
}
vp_ = init_vel_noise(V, 1.0 / kmperpixel, timestep, **kwargs)
vps.append(vp_)
D = [None for j in range(n_ens_members)]
R_f = [[] for j in range(n_ens_members)]
t_nowcast = 0
if measure_time:
init_time = time.time() - starttime_init
R = R[-1, :, :]
print("Starting nowcast computation.")
if measure_time:
starttime_mainloop = time.time()
if isinstance(timesteps, int):
timesteps = range(timesteps + 1)
timestep_type = "int"
else:
original_timesteps = [0] + list(timesteps)
timesteps = nowcast_utils.binned_timesteps(original_timesteps)
timestep_type = "list"
extrap_kwargs["return_displacement"] = True
R_f_prev = [R for i in range(n_ens_members)]
t_prev = [0.0 for j in range(n_ens_members)]
t_total = [0.0 for j in range(n_ens_members)]
# iterate each time step
for t, timestep in enumerate(timesteps):
if timestep_type == "list":
subtimesteps = [original_timesteps[t_] for t_ in timesteps[t]]
else:
subtimesteps = [t]
if len(subtimesteps) > 1 or t > 0:
nowcast_time_step = True
else:
nowcast_time_step = False
if nowcast_time_step:
print(
"Computing nowcast for time step %d... " % (t_nowcast + 1),
end="",
flush=True,
)
t_nowcast += 1
if measure_time:
starttime = time.time()
# iterate each ensemble member
def worker(j):
# first the global step
if noise_method is not None:
# generate noise field
EPS = generate_noise(
parsglob["P"], randstate=randgen_prec[j], fft_method=fft_method
)
# decompose the noise field into a cascade
EPS_d = decomp_method(
EPS,
parsglob["filter"],
fft_method=fft_method,
normalize=True,
compute_stats=True,
)
else:
EPS_d = None
# iterate the AR(p) model for each cascade level
R_c = parsglob["R_c"][j].copy()
if R_c.shape[1] >= ar_order:
R_c = R_c[:, -ar_order:, :, :].copy()
for i in range(n_cascade_levels):
# normalize the noise cascade
if EPS_d is not None:
EPS_ = (
EPS_d["cascade_levels"][i, :, :] - EPS_d["means"][i]
) / EPS_d["stds"][i]
else:
EPS_ = None
# apply AR(p) process to cascade level
R_c[i, :, :, :] = autoregression.iterate_ar_model(
R_c[i, :, :, :], parsglob["PHI"][i, :], eps=EPS_
)
EPS_ = None
parsglob["R_c"][j] = R_c.copy()
EPS = None
# compute the recomposed precipitation field(s) from the cascades
# obtained from the AR(p) model(s)
R_f_new = _recompose_cascade(R_c, parsglob["mu"], parsglob["sigma"])
R_c = None
# then the local steps
if n_windows_M > 1 or n_windows_N > 1:
idxm = np.zeros((2, 1), dtype=int)
idxn = np.zeros((2, 1), dtype=int)
R_l = np.zeros((M, N), dtype=float)
M_s = np.zeros((M, N), dtype=float)
for m in range(n_windows_M):
for n in range(n_windows_N):
# compute indices of local window
idxm[0] = int(
np.max((m * win_size[0] - overlap * win_size[0], 0))
)
idxm[1] = int(
np.min((idxm[0] + win_size[0] + overlap * win_size[0], M))
)
idxn[0] = int(
np.max((n * win_size[1] - overlap * win_size[1], 0))
)
idxn[1] = int(
np.min((idxn[0] + win_size[1] + overlap * win_size[1], N))
)
# build localization mask
mask = _get_mask((M, N), idxm, idxn)
mask_l = mask[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
]
M_s += mask
# skip if dry
if war[m, n] > war_thr:
R_c = rc[m][n][j].copy()
if R_c.shape[1] >= ar_order:
R_c = R_c[:, -ar_order:, :, :]
if noise_method is not None:
# extract noise field
EPS_d_l = EPS_d["cascade_levels"][
:,
idxm.item(0) : idxm.item(1),
idxn.item(0) : idxn.item(1),
].copy()
mu_ = np.mean(EPS_d_l, axis=(1, 2))
sigma_ = np.std(EPS_d_l, axis=(1, 2))
else:
EPS_d_l = None
# iterate the AR(p) model for each cascade level
for i in range(n_cascade_levels):
# normalize the noise cascade
if EPS_d_l is not None:
EPS_ = (
EPS_d_l[i, :, :] - mu_[i, None, None]
) / sigma_[i, None, None]
else:
EPS_ = None
# apply AR(p) process to cascade level
R_c[i, :, :, :] = autoregression.iterate_ar_model(
R_c[i, :, :, :], PHI[m, n, i, :], eps=EPS_
)
EPS_ = None
rc[m][n][j] = R_c.copy()
EPS_d_l = mu_ = sigma_ = None
# compute the recomposed precipitation field(s) from the cascades
# obtained from the AR(p) model(s)
mu_ = mu[m, n, :]
sigma_ = sigma[m, n, :]
R_c = [
((R_c[i, -1, :, :] * sigma_[i]) + mu_[i])
* parsglob["sigma"][i]
+ parsglob["mu"][i]
for i in range(len(mu_))
]
R_l_ = np.sum(np.stack(R_c), axis=0)
R_c = mu_ = sigma_ = None
# R_l_ = _recompose_cascade(R_c[:, :, :], mu[m, n, :], sigma[m, n, :])
else:
R_l_ = R_f_new[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
if probmatching_method == "cdf":
# adjust the CDF of the forecast to match the most recently
# observed precipitation field
R_ = R[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
R_l_ = probmatching.nonparam_match_empirical_cdf(R_l_, R_)
R_ = None
R_l[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
] += (R_l_ * mask_l)
R_l_ = None
ind = M_s > 0
R_l[ind] *= 1 / M_s[ind]
R_l[~ind] = R_min
R_f_new = R_l.copy()
R_l = None
if probmatching_method == "cdf":
# adjust the CDF of the forecast to match the most recently
# observed precipitation field
R_f_new[R_f_new < R_thr] = R_min
R_f_new = probmatching.nonparam_match_empirical_cdf(R_f_new, R)
if mask_method is not None:
# apply the precipitation mask to prevent generation of new
# precipitation into areas where it was not originally
# observed
if mask_method == "incremental":
MASK_prec = parsglob["MASK_prec"][j].copy()
R_f_new = R_f_new.min() + (R_f_new - R_f_new.min()) * MASK_prec
MASK_prec = None
if mask_method == "incremental":
parsglob["MASK_prec"][j] = _compute_incremental_mask(
R_f_new >= R_thr, struct, mask_rim
)
R_f_out = []
extrap_kwargs_ = extrap_kwargs.copy()
extrap_kwargs_["xy_coords"] = xy_coords
extrap_kwargs_["return_displacement"] = True
V_pert = V
# advect the recomposed precipitation field to obtain the forecast for
# the current time step (or subtimesteps if non-integer time steps are
# given)
for t_sub in subtimesteps:
if t_sub > 0:
t_diff_prev_int = t_sub - int(t_sub)
if t_diff_prev_int > 0.0:
R_f_ip = (1.0 - t_diff_prev_int) * R_f_prev[
j
] + t_diff_prev_int * R_f_new
else:
R_f_ip = R_f_prev[j]
t_diff_prev = t_sub - t_prev[j]
t_total[j] += t_diff_prev
# compute the perturbed motion field
if vel_pert_method is not None:
V_pert = V + generate_vel_noise(vps[j], t_total[j] * timestep)
extrap_kwargs_["displacement_prev"] = D[j]
R_f_ep, D[j] = extrapolator_method(
R_f_ip,
V_pert,
[t_diff_prev],
**extrap_kwargs_,
)
R_f_ep[0][R_f_ep[0] < R_thr] = R_min
R_f_out.append(R_f_ep[0])
t_prev[j] = t_sub
# advect the forecast field by one time step if no subtimesteps in the
# current interval were found
if len(subtimesteps) == 0:
t_diff_prev = t + 1 - t_prev[j]
t_total[j] += t_diff_prev
# compute the perturbed motion field
if vel_pert_method is not None:
V_pert = V + generate_vel_noise(vps[j], t_total[j] * timestep)
extrap_kwargs_["displacement_prev"] = D[j]
_, D[j] = extrapolator_method(
None,
V_pert,
[t_diff_prev],
**extrap_kwargs_,
)
t_prev[j] = t + 1
R_f_prev[j] = R_f_new
return R_f_out
res = []
for j in range(n_ens_members):
if not dask_imported or n_ens_members == 1:
res.append(worker(j))
else:
res.append(dask.delayed(worker)(j))
R_f_ = (
dask.compute(*res, num_workers=num_ensemble_workers)
if dask_imported and n_ens_members > 1
else res
)
res = None
if measure_time:
print("%.2f seconds." % (time.time() - starttime))
else:
print("done.")
if callback is not None:
callback(np.stack(R_f_))
R_f_ = None
if return_output:
for j in range(n_ens_members):
R_f[j].extend(R_f_[j])
if measure_time:
mainloop_time = time.time() - starttime_mainloop
if return_output:
outarr = np.stack([np.stack(R_f[j]) for j in range(n_ens_members)])
if measure_time:
return outarr, init_time, mainloop_time
else:
return outarr
else:
return None
|
31,588 | def get_dns_history_command(client, args):
hostname = args.get('hostname')
record_type = args.get('type')
page = int(args.get('page', 1))
res = client.get_dns_history(hostname=hostname, record_type=record_type, page=page)
res = {k: v for k, v in res.items() if k not in removed_keys}
records_list = list()
if record_type == "a":
pull_field = "ip"
elif record_type == "aaaa":
pull_field = "ipv6"
elif record_type == "mx":
pull_field = "host"
elif record_type == "ns":
pull_field = "nameserver"
elif record_type == "soa":
pull_field = "email"
elif record_type == "txt":
pull_field = "value"
records = res.get('records')
for record in records:
for value in record.get('values'):
if pull_field in value:
records_list.append(
{
"Record Type": record_type,
"Value(s)": value.get(pull_field)
}
)
readable_output = tableToMarkdown(f"DNS history for {hostname}:", records_list)
command_results = CommandResults(
outputs_prefix=f"SecurityTrails.Domain",
outputs_key_field="name",
outputs={
"name": hostname,
f"{record_type}_history_records": res.get('records'),
f"{record_type}_history_record_pages": res.get('pages', 1)
},
readable_output=readable_output
)
return_results(command_results)
latest_record = res.get('records', [])[0]
values = latest_record.get('values', [])
values = [values] if type(values) == dict else values
hosts = [x['host'] for x in values if "host" in x]
ipv4 = [x['ip'] for x in values if "ip" in x]
ipv6 = [x['ip'] for x in values if "ipv6" in x]
nameservers = [x['nameserver'] for x in values if "nameserver" in x]
domain_data = {
"Name": hostname
}
if nameservers:
domain_data['NameServers'] = ", ".join(nameservers)
create_standard_domain_context(domain_data=domain_data)
if ipv4:
[create_standard_ip_context({"Address": x}) for x in ipv4]
if ipv6:
[create_standard_ip_context({"Address": x}) for x in ipv6]
| def get_dns_history_command(client, args):
hostname = args.get('hostname')
record_type = args.get('type')
page = int(args.get('page', 1))
res = client.get_dns_history(hostname=hostname, record_type=record_type, page=page)
res = {k: v for k, v in res.items() if k not in removed_keys}
records_list = list()
if record_type == "a":
pull_field = "ip"
elif record_type == "aaaa":
pull_field = "ipv6"
elif record_type == "mx":
pull_field = "host"
elif record_type == "ns":
pull_field = "nameserver"
elif record_type == "soa":
pull_field = "email"
elif record_type == "txt":
pull_field = "value"
records = res.get('records')
for record in records:
for value in record.get('values'):
if pull_field in value:
records_list.append(
{
"Record Type": record_type,
"Value(s)": value.get(pull_field)
}
)
readable_output = tableToMarkdown(f"DNS history for {hostname}:", records_list)
command_results = CommandResults(
outputs_prefix="SecurityTrails.Domain",
outputs_key_field="name",
outputs={
"name": hostname,
f"{record_type}_history_records": res.get('records'),
f"{record_type}_history_record_pages": res.get('pages', 1)
},
readable_output=readable_output
)
return_results(command_results)
latest_record = res.get('records', [])[0]
values = latest_record.get('values', [])
values = [values] if type(values) == dict else values
hosts = [x['host'] for x in values if "host" in x]
ipv4 = [x['ip'] for x in values if "ip" in x]
ipv6 = [x['ip'] for x in values if "ipv6" in x]
nameservers = [x['nameserver'] for x in values if "nameserver" in x]
domain_data = {
"Name": hostname
}
if nameservers:
domain_data['NameServers'] = ", ".join(nameservers)
create_standard_domain_context(domain_data=domain_data)
if ipv4:
[create_standard_ip_context({"Address": x}) for x in ipv4]
if ipv6:
[create_standard_ip_context({"Address": x}) for x in ipv6]
|
25,614 | def _mix_latent_gp(W, g_mu, g_var, full_cov, full_output_cov):
r"""
Takes the mean and variance of a uncorrelated L-dimensional latent GP
and returns the mean and the variance of the mixed GP, `f = W \times g`,
where both f and g are GPs.
:param W: [P, L]
:param g_mu: [..., N, L]
:param g_var: [..., N, L] or [L, ..., N, N]
:return: f_mu and f_var, shape depends on `full_cov` and `full_output_cov`
"""
f_mu = tf.tensordot(g_mu, W, [[-1], [-1]]) # [..., N, P]
K = tf.rank(g_var)
leading_dims = (K - 3) if full_cov else (K - 2)
if full_cov and full_output_cov: # g_var is [L, ..., N, N]
# this branch is practically never taken
g_var = _rollaxis(g_var, 1) # [..., N, N, L]
g_var = tf.expand_dims(g_var, axis=-2) # [..., N, N, 1, L]
g_var_W = g_var * W # [..., N, P, L]
f_var = tf.tensordot(g_var_W, W, [[-1], [-1]]) # [..., N, N, P, P]
perm = _get_perm_with_leading_dims(leading_dims, K-3, K-1, K-2, K)
f_var = tf.transpose(f_var, perm) # [..., N, P, N, P]
elif full_cov and not full_output_cov: # g_var is [L, ..., N, N]
# this branch is practically never taken
f_var = tf.tensordot(g_var, W**2, [[0], [-1]]) # [..., N, N, P]
perm = _get_perm_with_leading_dims(leading_dims, K-1, K-3, K-2)
f_var = tf.transpose(f_var, perm) # [..., P, N, N]
elif not full_cov and full_output_cov: # g_var is [..., N, L]
g_var = tf.expand_dims(g_var, axis=-2) # [..., N, 1, L]
g_var_W = g_var * W # [..., N, P, L]
f_var = tf.tensordot(g_var_W, W, [[-1], [-1]]) # [..., N, P, P]
elif not full_cov and not full_output_cov: # g_var is [..., N, L]
W_squared = W**2 # [P, L]
f_var = tf.tensordot(g_var, W_squared, [[-1], [-1]]) # [..., N, P]
return f_mu, f_var
| def _mix_latent_gp(W, g_mu, g_var, full_cov, full_output_cov):
r"""
Takes the mean and variance of a uncorrelated L-dimensional latent GP
and returns the mean and the variance of the mixed GP, `f = W \times g`,
where both f and g are GPs.
:param W: [P, L]
:param g_mu: [..., N, L]
:param g_var: [..., N, L] (full_cov = False) or [L, ..., N, N] (full_cov = True)
:return: f_mu and f_var, shape depends on `full_cov` and `full_output_cov`
"""
f_mu = tf.tensordot(g_mu, W, [[-1], [-1]]) # [..., N, P]
K = tf.rank(g_var)
leading_dims = (K - 3) if full_cov else (K - 2)
if full_cov and full_output_cov: # g_var is [L, ..., N, N]
# this branch is practically never taken
g_var = _rollaxis(g_var, 1) # [..., N, N, L]
g_var = tf.expand_dims(g_var, axis=-2) # [..., N, N, 1, L]
g_var_W = g_var * W # [..., N, P, L]
f_var = tf.tensordot(g_var_W, W, [[-1], [-1]]) # [..., N, N, P, P]
perm = _get_perm_with_leading_dims(leading_dims, K-3, K-1, K-2, K)
f_var = tf.transpose(f_var, perm) # [..., N, P, N, P]
elif full_cov and not full_output_cov: # g_var is [L, ..., N, N]
# this branch is practically never taken
f_var = tf.tensordot(g_var, W**2, [[0], [-1]]) # [..., N, N, P]
perm = _get_perm_with_leading_dims(leading_dims, K-1, K-3, K-2)
f_var = tf.transpose(f_var, perm) # [..., P, N, N]
elif not full_cov and full_output_cov: # g_var is [..., N, L]
g_var = tf.expand_dims(g_var, axis=-2) # [..., N, 1, L]
g_var_W = g_var * W # [..., N, P, L]
f_var = tf.tensordot(g_var_W, W, [[-1], [-1]]) # [..., N, P, P]
elif not full_cov and not full_output_cov: # g_var is [..., N, L]
W_squared = W**2 # [P, L]
f_var = tf.tensordot(g_var, W_squared, [[-1], [-1]]) # [..., N, P]
return f_mu, f_var
|
28,068 | def build_stat_coll_cmd(action, config, source):
"""
Build the statistics collector analysis command.
"""
cmd = [config.analyzer_binary, '-c', '-x', action.lang, '--analyze',
# Do not warn about the unused gcc/g++ arguments.
'-Qunused-arguments',
'--analyzer-output', 'text']
for plugin in config.analyzer_plugins:
cmd.extend(["-Xclang", "-plugin",
"-Xclang", "checkercfg",
"-Xclang", "-load",
"-Xclang", plugin])
cmd.extend(['-Xclang',
'-analyzer-opt-analyze-headers'])
cmd.extend(config.analyzer_extra_arguments)
cmd.extend(action.analyzer_options)
# Enable the statistics collector checkers only.
collector_checkers = []
for checker_name, _ in config.checks().items():
if SpecialReturnValueCollector.checker_collect in checker_name:
collector_checkers.append(checker_name)
if ReturnValueCollector.checker_collect in checker_name:
collector_checkers.append(checker_name)
if not collector_checkers:
LOG.debug('No available statistics collector checkers were found')
return [], False
for coll_check in collector_checkers:
cmd.extend(['-Xclang', '-analyzer-checker=' + coll_check])
compile_lang = action.lang
if not has_flag('-x', cmd):
cmd.extend(['-x', compile_lang])
if not has_flag('--target', cmd) and action.target != "":
cmd.append("--target=" + action.target)
if not has_flag('-std', cmd) and not has_flag('--std', cmd):
cmd.append(action.compiler_standard)
cmd.extend(prepend_all('-isystem', action.compiler_includes))
if source:
cmd.append(source)
return cmd, True
| def build_stat_coll_cmd(action, config, source):
"""
Build the statistics collector analysis command.
"""
cmd = [config.analyzer_binary, '-c', '-x', action.lang, '--analyze',
# Do not warn about the unused gcc/g++ arguments.
'-Qunused-arguments',
'--analyzer-output', 'text']
for plugin in config.analyzer_plugins:
cmd.extend(["-Xclang", "-plugin",
"-Xclang", "checkercfg",
"-Xclang", "-load",
"-Xclang", plugin])
cmd.extend(['-Xclang',
'-analyzer-opt-analyze-headers'])
cmd.extend(config.analyzer_extra_arguments)
cmd.extend(action.analyzer_options)
# Enable the statistics collector checkers only.
collector_checkers = []
for checker_name, _ in config.checks().items():
if SpecialReturnValueCollector.checker_collect in checker_name:
collector_checkers.append(checker_name)
if ReturnValueCollector.checker_collect in checker_name:
collector_checkers.append(checker_name)
if not collector_checkers:
LOG.debug('No available statistics collector checkers were found')
return [], False
for coll_check in collector_checkers:
cmd.extend(['-Xclang', '-analyzer-checker=' + coll_check])
compile_lang = action.lang
if not has_flag('-x', cmd):
cmd.extend(['-x', compile_lang])
if not has_flag('--target', cmd) and action.target != "":
cmd.append("--target={action.target}")
if not has_flag('-std', cmd) and not has_flag('--std', cmd):
cmd.append(action.compiler_standard)
cmd.extend(prepend_all('-isystem', action.compiler_includes))
if source:
cmd.append(source)
return cmd, True
|
41,176 | def _gen_gray_code(n: int):
"""Generate the Gray Code from 0 to 2^n-1.
Each iteration returns two elements. The first element is the decimal representation
of the gray code and the second one is the position of bits flipped for next gray code.
"""
gray_code = 0
for i in range(1, 2 ** n):
next_gray = i ^ (i >> 1)
bit_flip = int(np.log2(gray_code ^ next_gray))
yield gray_code, bit_flip
gray_code = next_gray
yield gray_code, int(np.log2(gray_code))
| def _gen_gray_code(n: int):
"""Generate the Gray Code from 0 to 2^n-1.
Each iteration returns two elements. The first element is the decimal representation
of the gray code and `bit_flip` is the position of bits flipped for next gray code.
"""
gray_code = 0
for i in range(1, 2 ** n):
next_gray = i ^ (i >> 1)
bit_flip = int(np.log2(gray_code ^ next_gray))
yield gray_code, bit_flip
gray_code = next_gray
yield gray_code, int(np.log2(gray_code))
|
45,350 | def _predict(
booster,
data,
**kwargs,
):
"""
Run distributed prediction with a trained booster on Ray backend.
During work it runs xgb.predict on each worker for row partition of `data`
and creates Modin DataFrame with prediction results.
Parameters
----------
booster : xgboost.Booster
A trained booster.
data : modin.experimental.xgboost.DMatrix
Input data used for prediction.
**kwargs : dict
Other parameters are the same as `xgboost.Booster.predict`.
Returns
-------
modin.pandas.DataFrame
Modin DataFrame with prediction results.
"""
s = time.time()
# Get metainfo from dmatrix
input_index, input_columns, row_lengths = data.data_metainfo
# Infer columns of result
def _get_num_columns(booster, n_features, **kwargs):
rng = np.random.RandomState(777)
test_data = rng.randn(1, n_features)
test_predictions = booster.predict(
xgb.DMatrix(test_data), validate_features=False, **kwargs
)
num_columns = (
test_predictions.shape[1] if len(test_predictions.shape) > 1 else 1
)
return num_columns
result_num_columns = _get_num_columns(booster, len(input_columns), **kwargs)
new_columns = list(range(result_num_columns))
# Put common data in object store
booster = ray.put(booster)
new_columns_ref = ray.put(new_columns)
prediction_refs = [
_map_predict.remote(booster, part, new_columns_ref, **kwargs)
for _, part in data.data
]
predictions = from_partitions(
prediction_refs,
0,
index=input_index,
columns=new_columns,
row_lengths=row_lengths,
column_widths=[len(new_columns)],
)
LOGGER.info(f"Prediction time: {time.time() - s} s")
return predictions
| def _predict(
booster,
data,
**kwargs,
):
"""
Run distributed prediction with a trained booster on Ray backend.
During work it runs xgb.predict on each worker for row partition of `data`
and creates Modin DataFrame with prediction results.
Parameters
----------
booster : xgboost.Booster
A trained booster.
data : modin.experimental.xgboost.DMatrix
Input data used for prediction.
**kwargs : dict
Other parameters are the same as `xgboost.Booster.predict`.
Returns
-------
modin.pandas.DataFrame
Modin DataFrame with prediction results.
"""
s = time.time()
# Get metadata from DMatrix
input_index, input_columns, row_lengths = data.data_metainfo
# Infer columns of result
def _get_num_columns(booster, n_features, **kwargs):
rng = np.random.RandomState(777)
test_data = rng.randn(1, n_features)
test_predictions = booster.predict(
xgb.DMatrix(test_data), validate_features=False, **kwargs
)
num_columns = (
test_predictions.shape[1] if len(test_predictions.shape) > 1 else 1
)
return num_columns
result_num_columns = _get_num_columns(booster, len(input_columns), **kwargs)
new_columns = list(range(result_num_columns))
# Put common data in object store
booster = ray.put(booster)
new_columns_ref = ray.put(new_columns)
prediction_refs = [
_map_predict.remote(booster, part, new_columns_ref, **kwargs)
for _, part in data.data
]
predictions = from_partitions(
prediction_refs,
0,
index=input_index,
columns=new_columns,
row_lengths=row_lengths,
column_widths=[len(new_columns)],
)
LOGGER.info(f"Prediction time: {time.time() - s} s")
return predictions
|
27,786 | def _parse_ini_file(path: Path) -> PARSE_RESULT:
"""Parses .ini files with expected pytest.ini sections
todo: investigate if tool:pytest should be added
"""
iniconfig = _parse_ini_config(path)
if "pytest" in iniconfig:
return dict(iniconfig["pytest"].items())
return None
| def _parse_ini_file(path: Path) -> PARSE_RESULT:
"""Parses .ini files with expected pytest.ini sections
TODO: Investigate if tool:pytest should be added.
"""
iniconfig = _parse_ini_config(path)
if "pytest" in iniconfig:
return dict(iniconfig["pytest"].items())
return None
|
13,416 | def test_13_verify_logs_collection_still_work_after_moving_the_system_dataset_to_the_second_pool(logs_data):
cmd = "cat /var/log/middlewared.log"
middlewared_log = SSH_TEST(cmd, user, password, ip)
assert middlewared_log['result'] is True, str(middlewared_log)
logs_data['middleware_log_5'] = middlewared_log['output'].splitlines()[-1]
assert logs_data['middleware_log_4'] in middlewared_log['output'], str(middlewared_log['output'])
assert logs_data['middleware_log_4'] != logs_data['middleware_log_5']
cmd = "journalctl --no-page"
journald_log = SSH_TEST(cmd, user, password, ip)
assert journald_log['result'] is True, str(journald_log)
logs_data['journald_log_5'] = journald_log['output'].splitlines()[-1]
assert logs_data['journald_log_4'] in journald_log['output'], str(journald_log['output'])
assert logs_data['journald_log_4'] != logs_data['journald_log_5']
cmd = "cat /var/log/syslog"
syslog = SSH_TEST(cmd, user, password, ip)
assert syslog['result'] is True, str(syslog)
logs_data['syslog_5'] = syslog['output'].splitlines()[-1]
assert logs_data['syslog_4'] in syslog['output'], str(syslog['output'])
assert logs_data['syslog_4'] != logs_data['syslog_5']
| def test_13_verify_logs_after_sysds_is_moved_to_second_pool(logs_data):
cmd = "cat /var/log/middlewared.log"
middlewared_log = SSH_TEST(cmd, user, password, ip)
assert middlewared_log['result'] is True, str(middlewared_log)
logs_data['middleware_log_5'] = middlewared_log['output'].splitlines()[-1]
assert logs_data['middleware_log_4'] in middlewared_log['output'], str(middlewared_log['output'])
assert logs_data['middleware_log_4'] != logs_data['middleware_log_5']
cmd = "journalctl --no-page"
journald_log = SSH_TEST(cmd, user, password, ip)
assert journald_log['result'] is True, str(journald_log)
logs_data['journald_log_5'] = journald_log['output'].splitlines()[-1]
assert logs_data['journald_log_4'] in journald_log['output'], str(journald_log['output'])
assert logs_data['journald_log_4'] != logs_data['journald_log_5']
cmd = "cat /var/log/syslog"
syslog = SSH_TEST(cmd, user, password, ip)
assert syslog['result'] is True, str(syslog)
logs_data['syslog_5'] = syslog['output'].splitlines()[-1]
assert logs_data['syslog_4'] in syslog['output'], str(syslog['output'])
assert logs_data['syslog_4'] != logs_data['syslog_5']
|
13,908 | def parse_coverage(
lines: List[str],
*,
filename: str,
exclude_lines_by_pattern: Optional[str],
exclude_branches_by_pattern: Optional[str],
exclude_pattern_prefix: Optional[str],
flags: ParserFlags,
) -> FileCoverage:
"""
Extract coverage data from a gcov report.
Logging:
Parse problems are reported as warnings.
Coverage exclusion decisions are reported as verbose messages.
Arguments:
lines: the lines of the file to be parsed (excluding newlines)
filename: for error reports
exclude_lines_by_pattern: string with regex syntax to exclude
individual lines
exclude_branches_by_pattern: string with regex syntax to exclude
individual branches
exclude_pattern_prefix: string with prefix for _LINE/_START/_STOP markers.
flags: various choices for the parser behavior
Returns:
the coverage data
Raises:
Any exceptions during parsing, unless `ParserFlags.IGNORE_PARSE_ERRORS`
is enabled.
"""
context = _Context(flags, filename)
lines_with_errors: List[_LineWithError] = []
tokenized_lines: List[Tuple[_Line, str]] = []
for raw_line in lines:
# empty lines shouldn't occur in reality, but are common in testing
if not raw_line:
continue
try:
tokenized_lines.append((_parse_line(raw_line), raw_line))
except Exception as ex: # pylint: disable=broad-except
lines_with_errors.append((raw_line, ex))
if (
flags & ParserFlags.RESPECT_EXCLUSION_MARKERS
or flags & ParserFlags.PARSE_DECISIONS
):
src_lines = [
(line.lineno, line.source_code)
for line, _ in tokenized_lines
if isinstance(line, _SourceLine)
]
if flags & ParserFlags.RESPECT_EXCLUSION_MARKERS:
[line_is_excluded, branch_is_excluded] = _find_excluded_ranges(
lines=src_lines,
warnings=_ExclusionRangeWarnings(filename),
exclude_lines_by_pattern=exclude_lines_by_pattern,
exclude_branches_by_pattern=exclude_branches_by_pattern,
exclude_pattern_prefix=exclude_pattern_prefix,
)
else:
line_is_excluded = _make_is_in_any_range([])
branch_is_excluded = _make_is_in_any_range([])
coverage = FileCoverage(filename)
state = _ParserState()
for line, raw_line in tokenized_lines:
try:
state = _gather_coverage_from_line(
state,
line,
coverage=coverage,
line_is_excluded=line_is_excluded,
branch_is_excluded=branch_is_excluded,
context=context,
)
except Exception as ex: # pylint: disable=broad-except
lines_with_errors.append((raw_line, ex))
state = _ParserState(is_recovering=True)
# Clean up the final state. This shouldn't happen,
# but the last line could theoretically contain pending function lines
for function in state.deferred_functions:
_add_coverage_for_function(coverage, state.lineno + 1, function, context)
if flags & ParserFlags.PARSE_DECISIONS:
decision_parser = DecisionParser(coverage, src_lines)
decision_parser.parse_all_lines()
_report_lines_with_errors(lines_with_errors, context)
return coverage
| def parse_coverage(
lines: List[str],
*,
filename: str,
exclude_lines_by_pattern: Optional[str],
exclude_branches_by_pattern: Optional[str],
exclude_pattern_prefix: Optional[str],
flags: ParserFlags,
) -> FileCoverage:
"""
Extract coverage data from a gcov report.
Logging:
Parse problems are reported as warnings.
Coverage exclusion decisions are reported as verbose messages.
Arguments:
lines: the lines of the file to be parsed (excluding newlines)
filename: for error reports
exclude_lines_by_pattern: string with regex syntax to exclude
individual lines
exclude_branches_by_pattern: string with regex syntax to exclude
individual branches
exclude_pattern_prefix: string with prefix for _LINE/_START/_STOP markers.
flags: various choices for the parser behavior
Returns:
the coverage data
Raises:
Any exceptions during parsing, unless `ParserFlags.IGNORE_PARSE_ERRORS`
is enabled.
"""
context = _Context(flags, filename)
lines_with_errors: List[_LineWithError] = []
tokenized_lines: List[Tuple[_Line, str]] = []
for raw_line in lines:
# empty lines shouldn't occur in reality, but are common in testing
if not raw_line:
continue
try:
tokenized_lines.append((_parse_line(raw_line), raw_line))
except Exception as ex: # pylint: disable=broad-except
lines_with_errors.append((raw_line, ex))
if (
flags & ParserFlags.RESPECT_EXCLUSION_MARKERS
or flags & ParserFlags.PARSE_DECISIONS
):
src_lines = [
(line.lineno, line.source_code)
for line, _ in tokenized_lines
if isinstance(line, _SourceLine)
]
if flags & ParserFlags.RESPECT_EXCLUSION_MARKERS:
line_is_excluded, branch_is_excluded = _find_excluded_ranges(
lines=src_lines,
warnings=_ExclusionRangeWarnings(filename),
exclude_lines_by_pattern=exclude_lines_by_pattern,
exclude_branches_by_pattern=exclude_branches_by_pattern,
exclude_pattern_prefix=exclude_pattern_prefix,
)
else:
line_is_excluded = _make_is_in_any_range([])
branch_is_excluded = _make_is_in_any_range([])
coverage = FileCoverage(filename)
state = _ParserState()
for line, raw_line in tokenized_lines:
try:
state = _gather_coverage_from_line(
state,
line,
coverage=coverage,
line_is_excluded=line_is_excluded,
branch_is_excluded=branch_is_excluded,
context=context,
)
except Exception as ex: # pylint: disable=broad-except
lines_with_errors.append((raw_line, ex))
state = _ParserState(is_recovering=True)
# Clean up the final state. This shouldn't happen,
# but the last line could theoretically contain pending function lines
for function in state.deferred_functions:
_add_coverage_for_function(coverage, state.lineno + 1, function, context)
if flags & ParserFlags.PARSE_DECISIONS:
decision_parser = DecisionParser(coverage, src_lines)
decision_parser.parse_all_lines()
_report_lines_with_errors(lines_with_errors, context)
return coverage
|
13,742 | def _enqueue_recompute_grades_task(course_key, grading_policy_hash=None):
kwargs = {
'course_key': six.text_type('course_key'),
'event_transaction_id': six.text_type(get_event_transaction_id()),
'event_transaction_type': six.text_type(get_event_transaction_type()),
}
if grading_policy_hash is not None:
kwargs['grading_policy_hash'] = six.text_type(grading_policy_hash)
result = task_compute_all_grades_for_course.apply_async(kwargs=kwargs, countdown=GRADING_POLICY_COUNTDOWN_SECONDS)
log.info(u"Grades: Created {task_name}[{task_id}] with arguments {kwargs}".format(
task_name=task_compute_all_grades_for_course.name,
task_id=result.task_id,
kwargs=kwargs,
))
| def _enqueue_recompute_grades_task(course_key, grading_policy_hash=None):
kwargs = {
'course_key': str('course_key'),
'event_transaction_id': six.text_type(get_event_transaction_id()),
'event_transaction_type': six.text_type(get_event_transaction_type()),
}
if grading_policy_hash is not None:
kwargs['grading_policy_hash'] = six.text_type(grading_policy_hash)
result = task_compute_all_grades_for_course.apply_async(kwargs=kwargs, countdown=GRADING_POLICY_COUNTDOWN_SECONDS)
log.info(u"Grades: Created {task_name}[{task_id}] with arguments {kwargs}".format(
task_name=task_compute_all_grades_for_course.name,
task_id=result.task_id,
kwargs=kwargs,
))
|
54,082 | def _get_maintenance_config(cmd, client, file_path):
# get models
MaintenanceConfiguration = cmd.get_models('MaintenanceConfiguration', resource_type=CUSTOM_MGMT_AKS_PREVIEW, operation_group='maintenance_configurations')
TimeInWeek = cmd.get_models('TimeInWeek', resource_type=CUSTOM_MGMT_AKS_PREVIEW, operation_group='maintenance_configurations')
TimeSpan = cmd.get_models('TimeSpan', resource_type=CUSTOM_MGMT_AKS_PREVIEW, operation_group='maintenance_configurations')
maintenance_config = get_file_json(file_path)
logger.info(maintenance_config)
result = client._deserialize('MaintenanceConfiguration', maintenance_config)
logger.info(result)
return result
| def _get_maintenance_config(cmd, file_path):
maintenance_config = get_file_json(file_path)
return maintenance_config
# get models
MaintenanceConfiguration = cmd.get_models('MaintenanceConfiguration', resource_type=CUSTOM_MGMT_AKS_PREVIEW, operation_group='maintenance_configurations')
TimeInWeek = cmd.get_models('TimeInWeek', resource_type=CUSTOM_MGMT_AKS_PREVIEW, operation_group='maintenance_configurations')
TimeSpan = cmd.get_models('TimeSpan', resource_type=CUSTOM_MGMT_AKS_PREVIEW, operation_group='maintenance_configurations')
maintenance_config = get_file_json(file_path)
logger.info(maintenance_config)
result = client._deserialize('MaintenanceConfiguration', maintenance_config)
logger.info(result)
return result
|
43,928 | def expansion(la, lb, ra, rb, alpha, beta, t):
r"""Compute Hermite Gaussian expansion coefficients recursively for two Gaussian functions.
An overlap distribution, which defines the product of two Gaussians, can be written as a Hermite
expansion as [`Helgaker (1995) p798 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
\Omega_{ij} = \sum_{t=0}^{i+j} E_t^{ij} \Lambda_t,
where :math:`\Lambda` is a Hermite polynomial of degree t, :math:`E` denotes the expansion
coefficients, :math:`\Omega_{ij} = G_i G_j` and :math:`G` is a Gaussian function. The overalp
integral between two Gaussian functions can be simply computed by integrating over the overlap
distribution which requires obtaining the expansion coefficients. This can be done recursively
as [`Helgaker (1995) p799 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
E_t^{i+1,j} = \frac{1}{2p} E_{t-1}^{ij} - \frac{qr}{\alpha} E_{t}^{ij} + (t+1) E_{t+1}^{ij},
and
.. math::
E_t^{i,j+1} = \frac{1}{2p} E_{t-1}^{ij} + \frac{qr}{\beta} E_{t}^{ij} + (t+1) E_{t+1}^{ij},
where :math:`p = \alpha + \beta` and :math:`q = \alpha \beta / (\alpha + \beta)` are computed
from the Gaussian exponents :math:`\alpha, \beta` and the position :math:`r` is computed as
:math:`r = r_\alpha - r_\beta`. The starting coefficient is
.. math::
E_0^{00} = e^{-qr^2},
and :math:`E_t^{ij} = 0` is :math:`t < 0` or :math:`t > (i+j)`.
Args:
la (integer): angular momentum component for the first Gaussian function
lb (integer): angular momentum component for the second Gaussian function
ra (float): position component of the the first Gaussian function
rb (float): position component of the the second Gaussian function
alpha (array[float]): exponent of the first Gaussian function
beta (array[float]): exponent of the second Gaussian function
t(integer): number of nodes in the Hermite Gaussian
Returns:
array[float]: expansion coefficients for each Gaussian combination
**Example**
>>> la, lb = 0, 0
>>> ra, rb = 0.0, 0.0
>>> alpha = np.array([3.42525091])
>>> beta = np.array([3.42525091])
>>> t = 0
>>> c = expansion(la, lb, ra, rb, alpha, beta, t)
>>> c
array([1.])
"""
p = alpha + beta
q = alpha * beta / p
r = ra - rb
if la == lb == t == 0:
return anp.exp(-q * r ** 2)
elif t < 0 or t > (la + lb):
return 0.0
elif lb == 0:
return (
(1 / (2 * p)) * expansion(la - 1, lb, ra, rb, alpha, beta, t - 1)
- (q * r / alpha) * expansion(la - 1, lb, ra, rb, alpha, beta, t)
+ (t + 1) * expansion(la - 1, lb, ra, rb, alpha, beta, t + 1)
)
else:
return (
(1 / (2 * p)) * expansion(la, lb - 1, ra, rb, alpha, beta, t - 1)
+ (q * r / beta) * expansion(la, lb - 1, ra, rb, alpha, beta, t)
+ (t + 1) * expansion(la, lb - 1, ra, rb, alpha, beta, t + 1)
)
| def expansion(la, lb, ra, rb, alpha, beta, t):
r"""Compute Hermite Gaussian expansion coefficients recursively for two Gaussian functions.
An overlap distribution, which defines the product of two Gaussians, can be written as a Hermite
expansion as [`Helgaker (1995) p798 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
\Omega_{ij} = \sum_{t=0}^{i+j} E_t^{ij} \Lambda_t,
where :math:`\Lambda` is a Hermite polynomial of degree t, :math:`E` denotes the expansion
coefficients, :math:`\Omega_{ij} = G_i G_j` and :math:`G` is a Gaussian function. The overalp
integral between two Gaussian functions can be simply computed by integrating over the overlap
distribution which requires obtaining the expansion coefficients. This can be done recursively
as [`Helgaker (1995) p799 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
E_t^{i+1,j} = \frac{1}{2p} E_{t-1}^{ij} - \frac{qr}{\alpha} E_{t}^{ij} + (t+1) E_{t+1}^{ij},
and
.. math::
E_t^{i,j+1} = \frac{1}{2p} E_{t-1}^{ij} + \frac{qr}{\beta} E_{t}^{ij} + (t+1) E_{t+1}^{ij},
where :math:`p = \alpha + \beta` and :math:`q = \alpha \beta / (\alpha + \beta)` are computed
from the Gaussian exponents :math:`\alpha, \beta` and the position :math:`r` is computed as
:math:`r = r_\alpha - r_\beta`. The starting coefficient is
.. math::
E_0^{00} = e^{-qr^2},
and :math:`E_t^{ij} = 0` is :math:`t < 0` or :math:`t > (i+j)`.
Args:
la (integer): angular momentum component for the first Gaussian function
lb (integer): angular momentum component for the second Gaussian function
ra (float): position component of the the first Gaussian function
rb (float): position component of the the second Gaussian function
alpha (array[float]): exponent of the first Gaussian function
beta (array[float]): exponent of the second Gaussian function
t(integer): number of nodes in the Hermite Gaussian
Returns:
array[float]: expansion coefficients for each Gaussian combination
**Example**
>>> la, lb = 0, 0
>>> ra, rb = 0.0, 0.0
>>> alpha = np.array([3.42525091])
>>> beta = np.array([3.42525091])
>>> t = 0
>>> c = expansion(la, lb, ra, rb, alpha, beta, t)
>>> c
array([1.])
"""
p = alpha + beta
q = alpha * beta / p
r = ra - rb
if la == lb == t == 0:
return anp.exp(-q * r ** 2)
if t < 0 or t > (la + lb):
return 0.0
elif lb == 0:
return (
(1 / (2 * p)) * expansion(la - 1, lb, ra, rb, alpha, beta, t - 1)
- (q * r / alpha) * expansion(la - 1, lb, ra, rb, alpha, beta, t)
+ (t + 1) * expansion(la - 1, lb, ra, rb, alpha, beta, t + 1)
)
else:
return (
(1 / (2 * p)) * expansion(la, lb - 1, ra, rb, alpha, beta, t - 1)
+ (q * r / beta) * expansion(la, lb - 1, ra, rb, alpha, beta, t)
+ (t + 1) * expansion(la, lb - 1, ra, rb, alpha, beta, t + 1)
)
|
24,594 | def thermal_speed_coefficients(method: str, ndim: int) -> float:
r"""
Get the appropriate coefficient for calculating the thermal speed :math:`v_{th}`
based on the given ``method`` and ``ndim``. (See the
`~plasmapy.formulary.parameters.thermal_speed` :ref:`Notes <thermal-speed-notes>`
section for further details.)
Parameters
----------
method : `str`
Method to be used for calculating the thermal speed. Valid values are
``"most_probable"``, ``"rms"``, ``"mean_magnitude"``, and ``"nrl"``.
ndim : `int`
Dimensionality (1D, 2D, 3D) of space in which to calculate thermal
speed. Valid values are ``1``, ``2``, or ``3``.
Raises
------
`ValueError`
If ``method`` or ``ndim`` are not a valid value.
Notes
-----
For a detailed explanation of the different coefficients used to calculate
the therml speed, then look to the :ref:`Notes <thermal-speed-notes>` section
for `~plasmapy.formulary.parameters.thermal_speed`. The possible return
values are listed the table
.. table:: Thermal speed :math:`v_{th}` coefficients.
:widths: 2 1 1 1 1
:width: 100%
+--------------+------------+---------------+---------------+---------------+
| ↓ **method** | **ndim** → | ``1`` | ``2`` | ``3`` |
+--------------+------------+---------------+---------------+---------------+
| ``"most_probable"`` | .. math:: | .. math:: | .. math:: |
| | 0 | 1 | \sqrt{2} |
+--------------+------------+---------------+---------------+---------------+
| ``"rms"`` | .. math:: | .. math:: | .. math:: |
| | 1 | \sqrt{2} | \sqrt{3} |
+--------------+------------+---------------+---------------+---------------+
| ``"mean_magnitude"`` | .. math:: | .. math:: | .. math:: |
| | \sqrt{2/π} | \sqrt{π/2} | \sqrt{8/π} |
+--------------+------------+---------------+---------------+---------------+
| ``"nrl"`` | .. math:: |
| | 1 |
+--------------+------------+---------------+---------------+---------------+
Examples
--------
>>> thermal_speed_coefficients(method="most_probable", ndim=3)
1.414213...
"""
_coefficients = {
(1, "most_probable"): 0,
(2, "most_probable"): 1,
(3, "most_probable"): np.sqrt(2),
(1, "rms"): 1,
(2, "rms"): np.sqrt(2),
(3, "rms"): np.sqrt(3),
(1, "mean_magnitude"): np.sqrt(2 / np.pi),
(2, "mean_magnitude"): np.sqrt(np.pi / 2),
(3, "mean_magnitude"): np.sqrt(8 / np.pi),
(1, "nrl"): 1,
(2, "nrl"): 1,
(3, "nrl"): 1,
}
try:
coeff = _coefficients[(ndim, method)]
except KeyError:
raise ValueError(
f"Value for (ndim, method) pair not valid, got '({ndim}, {method})'."
)
return coeff
| def thermal_speed_coefficients(method: str, ndim: int) -> float:
r"""
Get the appropriate coefficient for calculating the thermal speed :math:`v_{th}`
based on the given ``method`` and ``ndim``. (See the
`~plasmapy.formulary.parameters.thermal_speed` :ref:`Notes <thermal-speed-notes>`
section for further details.)
Parameters
----------
method : `str`
Method to be used for calculating the thermal speed. Valid values are
``"most_probable"``, ``"rms"``, ``"mean_magnitude"``, and ``"nrl"``.
ndim : `int`
Dimensionality (1D, 2D, 3D) of space in which to calculate thermal
speed. Valid values are ``1``, ``2``, or ``3``.
Raises
------
`ValueError`
If ``method`` or ``ndim`` are not a valid value.
Notes
-----
For a detailed explanation of the different coefficients used to calculate
the therml speed, then look to the :ref:`Notes <thermal-speed-notes>` section
for `~plasmapy.formulary.parameters.thermal_speed`. The possible return
values are listed the following table:
.. table:: Thermal speed :math:`v_{th}` coefficients.
:widths: 2 1 1 1 1
:width: 100%
+--------------+------------+---------------+---------------+---------------+
| ↓ **method** | **ndim** → | ``1`` | ``2`` | ``3`` |
+--------------+------------+---------------+---------------+---------------+
| ``"most_probable"`` | .. math:: | .. math:: | .. math:: |
| | 0 | 1 | \sqrt{2} |
+--------------+------------+---------------+---------------+---------------+
| ``"rms"`` | .. math:: | .. math:: | .. math:: |
| | 1 | \sqrt{2} | \sqrt{3} |
+--------------+------------+---------------+---------------+---------------+
| ``"mean_magnitude"`` | .. math:: | .. math:: | .. math:: |
| | \sqrt{2/π} | \sqrt{π/2} | \sqrt{8/π} |
+--------------+------------+---------------+---------------+---------------+
| ``"nrl"`` | .. math:: |
| | 1 |
+--------------+------------+---------------+---------------+---------------+
Examples
--------
>>> thermal_speed_coefficients(method="most_probable", ndim=3)
1.414213...
"""
_coefficients = {
(1, "most_probable"): 0,
(2, "most_probable"): 1,
(3, "most_probable"): np.sqrt(2),
(1, "rms"): 1,
(2, "rms"): np.sqrt(2),
(3, "rms"): np.sqrt(3),
(1, "mean_magnitude"): np.sqrt(2 / np.pi),
(2, "mean_magnitude"): np.sqrt(np.pi / 2),
(3, "mean_magnitude"): np.sqrt(8 / np.pi),
(1, "nrl"): 1,
(2, "nrl"): 1,
(3, "nrl"): 1,
}
try:
coeff = _coefficients[(ndim, method)]
except KeyError:
raise ValueError(
f"Value for (ndim, method) pair not valid, got '({ndim}, {method})'."
)
return coeff
|
41,696 | def unpack_buffer(
buffer: JsProxy,
*,
filename: str = "",
format: str = None,
target: Literal["site", "lib", None] = None,
extract_dir: str = None,
calculate_dynlibs: bool = False,
) -> Optional[JsProxy]:
"""Used to install a package either into sitepackages or into the standard
library.
This is a helper method called from ``loadPackage``.
Parameters
----------
buffer
A Javascript ``Uint8Array`` with the binary data for the archive.
filename
The name of the file we are extracting. We only care about it to figure
out whether the buffer represents a tar file or a zip file. Ignored if
format argument is present.
format
Controls the format that we assume the archive has. Overrides the file
extension of filename.
extract_dir
Controls which directory the file is unpacked into. Default is the
working directory. Mutually exclusive with target.
target
Controls which directory the file is unpacked into. Either "site" which
unpacked the file into the sitepackages directory or "lib" which
unpacked the file into the standard library. Mutually exclusive with
extract_dir.
calculate_dynlibs
If true, will return a Javascript Array of paths to dynamic libraries
('.so' files) that were in the archive. We need to precompile these Wasm
binaries in `load-pyodide.js`. These paths point to the unpacked
locations of the .so files.
Returns
-------
If calculate_dynlibs is True, a Javascript Array of dynamic libraries.
Otherwise, return None.
"""
if format:
format = get_format(format)
if target and extract_dir:
raise ValueError("Cannot provide both 'target' and 'extract_dir'")
if not filename and not format:
raise ValueError("At least one of filename and format must be provided")
if target:
extract_path = TARGETS[target]
elif extract_dir:
extract_path = Path(extract_dir)
else:
extract_path = Path(".")
with NamedTemporaryFile(suffix=filename) as f:
buffer._into_file(f)
shutil.unpack_archive(f.name, extract_path, format)
if calculate_dynlibs:
return to_js(get_dynlibs(f, extract_path))
else:
return None
| def unpack_buffer(
buffer: JsProxy,
*,
filename: str = "",
format: str = None,
target: Literal["site", "lib", None] = None,
extract_dir: str = None,
calculate_dynlibs: bool = False,
) -> Optional[JsProxy]:
"""Used to install a package either into sitepackages or into the standard
library.
This is a helper method called from ``loadPackage``.
Parameters
----------
buffer
A Javascript ``Uint8Array`` with the binary data for the archive.
filename
The name of the file we are extracting. We only care about it to figure
out whether the buffer represents a tar file or a zip file. Ignored if
format argument is present.
format
Controls the format that we assume the archive has. Overrides the file
extension of filename.
extract_dir
Controls which directory the file is unpacked into. Default is the
working directory. Mutually exclusive with target.
target
Controls which directory the file is unpacked into. Either "site" which
unpacked the file into the sitepackages directory or "lib" which
unpacked the file into the standard library. Mutually exclusive with
extract_dir.
calculate_dynlibs
If true, will return a Javascript Array of paths to dynamic libraries
('.so' files) that were in the archive. We need to precompile these Wasm
binaries in `load-pyodide.js`. These paths point to the unpacked
locations of the .so files.
Returns
-------
If calculate_dynlibs is True, a Javascript Array of dynamic libraries.
Otherwise, return None.
"""
if format:
format = get_format(format)
if target and extract_dir:
raise ValueError("Cannot provide both 'target' and 'extract_dir'")
if not filename and format is None:
raise ValueError("At least one of filename and format must be provided")
if target:
extract_path = TARGETS[target]
elif extract_dir:
extract_path = Path(extract_dir)
else:
extract_path = Path(".")
with NamedTemporaryFile(suffix=filename) as f:
buffer._into_file(f)
shutil.unpack_archive(f.name, extract_path, format)
if calculate_dynlibs:
return to_js(get_dynlibs(f, extract_path))
else:
return None
|
2,155 | def mean_variance_axis(X, axis, weights=None, return_sum_weights=False):
"""Compute mean and variance along an axix on a CSR or CSC matrix
Parameters
----------
X : CSR or CSC sparse matrix, shape (n_samples, n_features)
Input data.
axis : int (either 0 or 1)
Axis along which the axis should be computed.
weights : ndarray, shape (n_samples,) or (n_features,) | None
if axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
return_sum_weights : bool
If True, returns the sum of weights seen for each feature
if axis=0 or each sample if axis=1.
Returns
-------
means : float array with shape (n_features,)
Feature-wise means
variances : float array with shape (n_features,)
Feature-wise variances
sum_weights : float array with shape (n_features,)
Returned if return_sum_weights is True.
"""
_raise_error_wrong_axis(axis)
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _csr_mean_var_axis0(
X, weights=weights, return_sum_weights=return_sum_weights)
else:
return _csc_mean_var_axis0(
X.T, weights=weights, return_sum_weights=return_sum_weights)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _csc_mean_var_axis0(
X, weights=weights, return_sum_weights=return_sum_weights)
else:
return _csr_mean_var_axis0(
X.T, weights=weights, return_sum_weights=return_sum_weights)
else:
_raise_typeerror(X)
| def mean_variance_axis(X, axis, weights=None, return_sum_weights=False):
"""Compute mean and variance along an axix on a CSR or CSC matrix
Parameters
----------
X : CSR or CSC sparse matrix, shape (n_samples, n_features)
Input data.
axis : int (either 0 or 1)
Axis along which the axis should be computed.
weights : ndarray, shape (n_samples,) or (n_features,) | None
if axis is set to 0 shape is (n_samples,) or
if axis is set to 1 shape is (n_features,).
If it is set to None, then samples are equally weighted.
return_sum_weights : bool, default=False
If True, returns the sum of weights seen for each feature
if axis=0 or each sample if axis=1.
Returns
-------
means : float array with shape (n_features,)
Feature-wise means
variances : float array with shape (n_features,)
Feature-wise variances
sum_weights : float array with shape (n_features,)
Returned if return_sum_weights is True.
"""
_raise_error_wrong_axis(axis)
if isinstance(X, sp.csr_matrix):
if axis == 0:
return _csr_mean_var_axis0(
X, weights=weights, return_sum_weights=return_sum_weights)
else:
return _csc_mean_var_axis0(
X.T, weights=weights, return_sum_weights=return_sum_weights)
elif isinstance(X, sp.csc_matrix):
if axis == 0:
return _csc_mean_var_axis0(
X, weights=weights, return_sum_weights=return_sum_weights)
else:
return _csr_mean_var_axis0(
X.T, weights=weights, return_sum_weights=return_sum_weights)
else:
_raise_typeerror(X)
|
22,443 | def arg_parser():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-u', '--galaxy-url', default="http://localhost:8080", help='Galaxy URL')
parser.add_argument('-k', '--key', default=None, help='Galaxy User API Key')
parser.add_argument('-a', '--admin-key', default=None, help='Galaxy Admin API Key')
parser.add_argument('--force_path_paste', default=False, action="store_true", help='This requires Galaxy-side config option "allow_path_paste" enabled. Allows for fetching test data locally. Only for admins.')
parser.add_argument('-t', '--tool-id', default=ALL_TOOLS, help='Tool ID')
parser.add_argument('--tool-version', default=None, help='Tool Version (if tool id supplied). Defaults to just latest version, use * to test all versions')
parser.add_argument('-i', '--test-index', default=ALL_TESTS, type=int, help='Tool Test Index (starting at 0) - by default all tests will run.')
parser.add_argument('-o', '--output', default=None, help='directory to dump outputs to')
parser.add_argument('--append', default=False, action="store_true", help="Extend a test record json (created with --output-json) with additional tests.")
skip_group = parser.add_mutually_exclusive_group()
skip_group.add_argument('--skip-previously-executed', dest="skip", default="no", action="store_const", const="executed", help="When used with --append, skip any test previously executed.")
skip_group.add_argument('--skip-previously-successful', dest="skip", default="no", action="store_const", const="successful", help="When used with --append, skip any test previously executed successfully.")
parser.add_argument('-j', '--output-json', default=None, help='output metadata json')
parser.add_argument('--verbose', default=False, action="store_true", help="Verbose logging.")
parser.add_argument('-c', '--client-test-config', default=None, help="Test config YAML to help with client testing")
parser.add_argument('--suite-name', default=DEFAULT_SUITE_NAME, help="Suite name for tool test output")
parser.add_argument('--with-reference-data', dest="with_reference_data", default=False, action="store_true")
parser.add_argument('--skip-with-reference-data', dest="with_reference_data", action="store_false", help="Skip tests the Galaxy server believes use data tables or loc files.")
history_per_group = parser.add_mutually_exclusive_group()
history_per_group.add_argument('--history-per-suite', dest="history_per_test_case", default=False, action="store_false", help="Create new history per test suite (all tests in same history).")
history_per_group.add_argument('--history-per-test-case', dest="history_per_test_case", action="store_true", help="Create new history per test case.")
history_per_group.add_argument('--history-name', default=None, help="Override default history name")
parser.add_argument('--no-history-reuse', default=False, action="store_true", help="Do not reuse histories if a matchine one already exists.")
parser.add_argument('--no-history-cleanup', default=False, action="store_true", help="Perserve histories created for testing.")
parser.add_argument('--publish-history', default=False, action="store_true", help="Publish test history. Useful for CI testing.")
parser.add_argument('--parallel-tests', default=1, type=int, help="Parallel tests.")
parser.add_argument('--retries', default=0, type=int, help="Retry failed tests.")
parser.add_argument('--page-size', default=0, type=int, help="If positive, use pagination and just run one 'page' to tool tests.")
parser.add_argument('--page-number', default=0, type=int, help="If page size is used, run this 'page' of tests - starts with 0.")
parser.add_argument('--download-attempts', default=1, type=int, help="Galaxy may return a transient 500 status code for download if test results are written but not yet accessible.")
parser.add_argument('--download-sleep', default=1, type=int, help="If download attempts is greater than 1, the amount to sleep between download attempts.")
parser.add_argument('--test-data', action='append', help='Add local test data path to search for missing test data')
return parser
| def arg_parser():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-u', '--galaxy-url', default="http://localhost:8080", help='Galaxy URL')
parser.add_argument('-k', '--key', default=None, help='Galaxy User API Key')
parser.add_argument('-a', '--admin-key', default=None, help='Galaxy Admin API Key')
parser.add_argument('--force_path_paste', default=False, action="store_true", help='This requires Galaxy-side config option "allow_path_paste" enabled. Allows for fetching test data locally. Only for admins.')
parser.add_argument('-t', '--tool-id', default=ALL_TOOLS, help='Tool ID')
parser.add_argument('--tool-version', default=None, help='Tool Version (if tool id supplied). Defaults to just latest version, use * to test all versions')
parser.add_argument('-i', '--test-index', default=ALL_TESTS, type=int, help='Tool Test Index (starting at 0) - by default all tests will run.')
parser.add_argument('-o', '--output', default=None, help='directory to dump outputs to')
parser.add_argument('--append', default=False, action="store_true", help="Extend a test record json (created with --output-json) with additional tests.")
skip_group = parser.add_mutually_exclusive_group()
skip_group.add_argument('--skip-previously-executed', dest="skip", default="no", action="store_const", const="executed", help="When used with --append, skip any test previously executed.")
skip_group.add_argument('--skip-previously-successful', dest="skip", default="no", action="store_const", const="successful", help="When used with --append, skip any test previously executed successfully.")
parser.add_argument('-j', '--output-json', default=None, help='output metadata json')
parser.add_argument('--verbose', default=False, action="store_true", help="Verbose logging.")
parser.add_argument('-c', '--client-test-config', default=None, help="Test config YAML to help with client testing")
parser.add_argument('--suite-name', default=DEFAULT_SUITE_NAME, help="Suite name for tool test output")
parser.add_argument('--with-reference-data', dest="with_reference_data", default=False, action="store_true")
parser.add_argument('--skip-with-reference-data', dest="with_reference_data", action="store_false", help="Skip tests the Galaxy server believes use data tables or loc files.")
history_per_group = parser.add_mutually_exclusive_group()
history_per_group.add_argument('--history-per-suite', dest="history_per_test_case", default=False, action="store_false", help="Create new history per test suite (all tests in same history).")
history_per_group.add_argument('--history-per-test-case', dest="history_per_test_case", action="store_true", help="Create new history per test case.")
history_per_group.add_argument('--history-name', default=None, help="Override default history name")
parser.add_argument('--no-history-reuse', default=False, action="store_true", help="Do not reuse histories if a matching one already exists.")
parser.add_argument('--no-history-cleanup', default=False, action="store_true", help="Perserve histories created for testing.")
parser.add_argument('--publish-history', default=False, action="store_true", help="Publish test history. Useful for CI testing.")
parser.add_argument('--parallel-tests', default=1, type=int, help="Parallel tests.")
parser.add_argument('--retries', default=0, type=int, help="Retry failed tests.")
parser.add_argument('--page-size', default=0, type=int, help="If positive, use pagination and just run one 'page' to tool tests.")
parser.add_argument('--page-number', default=0, type=int, help="If page size is used, run this 'page' of tests - starts with 0.")
parser.add_argument('--download-attempts', default=1, type=int, help="Galaxy may return a transient 500 status code for download if test results are written but not yet accessible.")
parser.add_argument('--download-sleep', default=1, type=int, help="If download attempts is greater than 1, the amount to sleep between download attempts.")
parser.add_argument('--test-data', action='append', help='Add local test data path to search for missing test data')
return parser
|
42,678 | def _populate_db_with_balances(connection, ts: Timestamp):
cursor = connection.cursor()
cursor.execute('INSERT OR IGNORE INTO assets(identifier) VALUES(?)', (NFT_TOKEN_ID,))
cursor.execute(
"""
INSERT INTO "timed_balances" ("category", "time", "currency", "amount", "usd_value") VALUES
(?, ?, ?, ?, ?);
""", ('A', ts, NFT_TOKEN_ID, '1.00', '178.44'),
)
cursor.execute(
"""
INSERT INTO "timed_balances" ("category", "time", "currency", "amount", "usd_value") VALUES
(?, ?, ?, ?, ?);
""", ('A', ts, 'AVAX', '1.00', '87'),
)
connection.commit()
| def _populate_db_with_balances(connection, ts: Timestamp):
cursor = connection.cursor()
cursor.execute('INSERT OR IGNORE INTO assets(identifier) VALUES(?)', (NFT_TOKEN_ID,))
cursor.execute(
"""
INSERT INTO timed_balances ("category", "time", "currency", "amount", "usd_value") VALUES
(?, ?, ?, ?, ?);
""", ('A', ts, NFT_TOKEN_ID, '1.00', '178.44'),
)
cursor.execute(
"""
INSERT INTO "timed_balances" ("category", "time", "currency", "amount", "usd_value") VALUES
(?, ?, ?, ?, ?);
""", ('A', ts, 'AVAX', '1.00', '87'),
)
connection.commit()
|
31,753 | def collect_entries_data_from_response(parsed_feed_data: FeedParserDict) -> List[Dict[str, Any]]:
"""Collects relevant data from the parsed RSS feed entries.
Args:
parsed_feed_data (FeedParserDict): Parsed RSS feed data.
Returns:
List[Dict[str, Any]]: The data from the RSS feed relevant for the widget.
"""
entries_data: List[Dict[str, Any]] = []
if not parsed_feed_data:
raise DemistoException("Could not parse feed data.")
for entry in reversed(parsed_feed_data.entries):
if entry:
published = email.utils.parsedate(entry.published)
if not published:
continue
published_dt = datetime.fromtimestamp(mktime(published))
published_formatted = published_dt.strftime('%B %-d, %Y %-I:%M %p')
entries_data.append(
{
'timestamp': published_formatted,
'link': entry.get('link'),
'title': entry.get('title'),
'summary': entry.get('summary')
}
)
return entries_data
| def collect_entries_data_from_response(parsed_feed_data: FeedParserDict) -> List[Dict[str, Any]]:
"""Collects relevant data from the parsed RSS feed entries.
Args:
parsed_feed_data (FeedParserDict): Parsed RSS feed data.
Returns:
List[Dict[str, Any]]: The data from the RSS feed relevant for the widget.
"""
entries_data: List[Dict[str, Any]] = []
if not parsed_feed_data:
raise DemistoException("Could not parse feed data.")
for entry in reversed(parsed_feed_data.entries):
if entry:
published = email.utils.parsedate(entry.published)
if not published:
continue
published_dt = datetime.fromtimestamp(mktime(published))
published_formatted = published_dt.strftime('%B %-d, %Y %-I:%M %p')
entries_data.append(
{
'timestamp': published_formatted,
'link': entry.get('link'),
'title': entry.get('title'),
'summary': entry.get('summary'),
}
)
return entries_data
|
32,051 | def relationships_manager(client: Client, entity_a: str, entity_a_type: str, indicator_type: str,
indicator: str, field_for_passive_dns_rs: str, feed_indicator_type_for_passive_dns_rs: str):
"""
manage the relationships creation
Args:
client: Client object with request
entity_a: str the first entity of the relationship
entity_a_type: str the type of the first entity
indicator_type: str the indicator type to get the related information by
entity_b_type: str the indicator to get the related information by
:returns:
a list of the relationships that were created
"""
relationships: list = []
if client.max_indicator_relationships != 0:
params = {'limit': str(client.max_indicator_relationships)}
_, _, urls_raw_response = alienvault_get_related_urls_by_indicator_command(client, indicator_type, indicator, params)
urls_raw_response = delete_duplicated_relationships(dict_safe_get(urls_raw_response, ['url_list'], ['']), 'url')
relationships += create_relationships(client, urls_raw_response, entity_a, entity_a_type, 'url', FeedIndicatorType.URL)
_, _, hash_raw_response = alienvault_get_related_hashes_by_indicator_command(client, indicator_type, indicator, params)
hash_raw_response = delete_duplicated_relationships(dict_safe_get(hash_raw_response, ['data'], ['']), 'hash')
relationships += create_relationships(client, hash_raw_response, entity_a, entity_a_type, 'hash', FeedIndicatorType.File)
_, _, passive_dns_raw_response = alienvault_get_passive_dns_data_by_indicator_command(client, indicator_type,
indicator, params)
if len(dict_safe_get(passive_dns_raw_response, ['passive_dns'], [''])) > client.max_indicator_relationships:
passive_dns_raw_response = delete_duplicated_relationships(passive_dns_raw_response.get('passive_dns')
[0:client.max_indicator_relationships],
field_for_passive_dns_rs)
else:
passive_dns_raw_response = delete_duplicated_relationships(dict_safe_get(passive_dns_raw_response, ['passive_dns'],
['']), field_for_passive_dns_rs)
passive_dns_raw_response = validate_string_is_not_url(passive_dns_raw_response, field_for_passive_dns_rs)
relationships += create_relationships(client, passive_dns_raw_response, entity_a,
entity_a_type, field_for_passive_dns_rs, feed_indicator_type_for_passive_dns_rs)
return relationships
| def relationships_manager(client: Client, entity_a: str, entity_a_type: str, indicator_type: str,
indicator: str, field_for_passive_dns_rs: str, feed_indicator_type_for_passive_dns_rs: str):
"""
manage the relationships creation
Args:
client: Client object with request
entity_a: str the first entity of the relationship
entity_a_type: str the type of the first entity
indicator_type: str the indicator type to get the related information by
entity_b_type: str the indicator to get the related information by
:returns:
a list of the relationships that were created
"""
relationships: list = []
if client.max_indicator_relationships != 0:
params = {'limit': str(client.max_indicator_relationships)}
_, _, urls_raw_response = alienvault_get_related_urls_by_indicator_command(client, indicator_type, indicator, params)
urls_raw_response = delete_duplicated_relationships(urls_raw_response.get('url_list', []), 'url')
relationships += create_relationships(client, urls_raw_response, entity_a, entity_a_type, 'url', FeedIndicatorType.URL)
_, _, hash_raw_response = alienvault_get_related_hashes_by_indicator_command(client, indicator_type, indicator, params)
hash_raw_response = delete_duplicated_relationships(dict_safe_get(hash_raw_response, ['data'], ['']), 'hash')
relationships += create_relationships(client, hash_raw_response, entity_a, entity_a_type, 'hash', FeedIndicatorType.File)
_, _, passive_dns_raw_response = alienvault_get_passive_dns_data_by_indicator_command(client, indicator_type,
indicator, params)
if len(dict_safe_get(passive_dns_raw_response, ['passive_dns'], [''])) > client.max_indicator_relationships:
passive_dns_raw_response = delete_duplicated_relationships(passive_dns_raw_response.get('passive_dns')
[0:client.max_indicator_relationships],
field_for_passive_dns_rs)
else:
passive_dns_raw_response = delete_duplicated_relationships(dict_safe_get(passive_dns_raw_response, ['passive_dns'],
['']), field_for_passive_dns_rs)
passive_dns_raw_response = validate_string_is_not_url(passive_dns_raw_response, field_for_passive_dns_rs)
relationships += create_relationships(client, passive_dns_raw_response, entity_a,
entity_a_type, field_for_passive_dns_rs, feed_indicator_type_for_passive_dns_rs)
return relationships
|
23,006 | def test_read_csv_skiprows_range():
with filetext(csv_text) as fn:
f = dd.read_csv(fn, skiprows=range(5))
result = f.compute(scheduler='sync')
expected = pd.read_csv(fn, skiprows=range(5))
assert_eq(result, expected)
| def test_read_csv_skiprows_range():
with filetext(csv_text) as fn:
f = dd.read_csv(fn, skiprows=range(5))
result = f
expected = pd.read_csv(fn, skiprows=range(5))
assert_eq(result, expected)
|
34,875 | def keras_op_to_relay(inexpr, keras_layer, outname, etab):
"""Convert keras layer to relay expr, and update etab.
Parameters
----------
inexpr : relay.expr.Expr or a list of it
The input relay expr(s)
keras_layer : keras.layers
The keras layer to be converted
outname : str
Name of the output relay expr
etab : relay.frontend.common.ExprTable
The global expr table to be updated
"""
if type(keras_layer).__name__ not in _convert_map:
raise NotImplementedError("{} is not supported".format((type(keras_layer).__name__)))
outs = _convert_map[type(keras_layer).__name__](inexpr, keras_layer, etab)
outs = _as_list(outs)
for t_idx, out in enumerate(outs):
name = outname + ":" + str(t_idx)
etab.set_expr(name, out)
| def keras_op_to_relay(inexpr, keras_layer, outname, etab):
"""Convert keras layer to relay expr, and update etab.
Parameters
----------
inexpr : relay.expr.Expr or a list of it
The input Relay expression(s).
keras_layer : keras.layers
The keras layer to be converted
outname : str
Name of the output relay expr
etab : relay.frontend.common.ExprTable
The global expr table to be updated
"""
if type(keras_layer).__name__ not in _convert_map:
raise NotImplementedError("{} is not supported".format((type(keras_layer).__name__)))
outs = _convert_map[type(keras_layer).__name__](inexpr, keras_layer, etab)
outs = _as_list(outs)
for t_idx, out in enumerate(outs):
name = outname + ":" + str(t_idx)
etab.set_expr(name, out)
|
38,971 | def field_singleton_schema( # noqa: C901 (ignore complexity)
field: ModelField,
*,
by_alias: bool,
model_name_map: Dict[Type['BaseModel'], str],
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: Set[Type['BaseModel']],
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
This function is indirectly used by ``field_schema()``, you should probably be using that function.
Take a single Pydantic ``ModelField``, and return its schema and any additional definitions from sub-models.
"""
from .main import BaseModel # noqa: F811
ref_prefix = ref_prefix or default_prefix
definitions: Dict[str, Any] = {}
nested_models: Set[str] = set()
if field.sub_fields:
return field_singleton_sub_fields_schema(
field.sub_fields,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
known_models=known_models,
)
if field.type_ is Any or field.type_.__class__ == TypeVar:
return {}, definitions, nested_models # no restrictions
if is_callable_type(field.type_):
raise SkipField(f'Callable {field.name} was excluded from schema since JSON schema has no equivalent type.')
f_schema: Dict[str, Any] = {}
if field.field_info is not None and field.field_info.const:
f_schema['const'] = field.default
field_type = field.type_
if is_literal_type(field_type):
values = literal_values(field_type)
if len(values) > 1:
return field_schema(
multivalue_literal_field_for_schema(values, field),
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
known_models=known_models,
)
literal_value = values[0]
field_type = literal_value.__class__
f_schema['const'] = literal_value
if isinstance(field_type, EnumMeta):
model_name = normalize_model_name(field_type.__name__)
f_schema = {'$ref': ref_prefix + model_name}
definitions[model_name] = enum_process_schema(field_type)
else:
add_field_type_to_schema(field_type, f_schema)
modify_schema = getattr(field_type, '__modify_schema__', None)
if modify_schema:
modify_schema(f_schema)
if f_schema:
return f_schema, definitions, nested_models
# Handle dataclass-based models
if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), BaseModel):
field_type = field_type.__pydantic_model__
if issubclass(field_type, BaseModel):
model_name = model_name_map[field_type]
if field_type not in known_models:
sub_schema, sub_definitions, sub_nested_models = model_process_schema(
field_type,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
known_models=known_models,
)
definitions.update(sub_definitions)
definitions[model_name] = sub_schema
nested_models.update(sub_nested_models)
else:
nested_models.add(model_name)
schema_ref = {'$ref': ref_prefix + model_name}
if not schema_overrides:
return schema_ref, definitions, nested_models
else:
return {'allOf': [schema_ref]}, definitions, nested_models
raise ValueError(f'Value not declarable with JSON Schema, field: {field}')
| def field_singleton_schema( # noqa: C901 (ignore complexity)
field: ModelField,
*,
by_alias: bool,
model_name_map: Dict[Type['BaseModel'], str],
schema_overrides: bool = False,
ref_prefix: Optional[str] = None,
known_models: Set[Type['BaseModel']],
) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]:
"""
This function is indirectly used by ``field_schema()``, you should probably be using that function.
Take a single Pydantic ``ModelField``, and return its schema and any additional definitions from sub-models.
"""
from .main import BaseModel # noqa: F811
ref_prefix = ref_prefix or default_prefix
definitions: Dict[str, Any] = {}
nested_models: Set[str] = set()
if field.sub_fields:
return field_singleton_sub_fields_schema(
field.sub_fields,
by_alias=by_alias,
model_name_map=model_name_map,
schema_overrides=schema_overrides,
ref_prefix=ref_prefix,
known_models=known_models,
)
if field.type_ is Any or field.type_.__class__ == TypeVar:
return {}, definitions, nested_models # no restrictions
if is_callable_type(field.type_):
raise SkipField(f'Callable {field.name} was excluded from schema since JSON schema has no equivalent type.')
f_schema: Dict[str, Any] = {}
if field.field_info is not None and field.field_info.const:
f_schema['const'] = field.default
field_type = field.type_
if is_literal_type(field_type):
values = literal_values(field_type)
if len(values) > 1:
return field_schema(
multivalue_literal_field_for_schema(values, field),
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
known_models=known_models,
)
literal_value = values[0]
field_type = literal_value.__class__
f_schema['const'] = literal_value
if isinstance(field_type, EnumMeta):
enum_name = normalize_model_name(field_type.__name__)
f_schema = {'$ref': ref_prefix + model_name}
definitions[model_name] = enum_process_schema(field_type)
else:
add_field_type_to_schema(field_type, f_schema)
modify_schema = getattr(field_type, '__modify_schema__', None)
if modify_schema:
modify_schema(f_schema)
if f_schema:
return f_schema, definitions, nested_models
# Handle dataclass-based models
if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), BaseModel):
field_type = field_type.__pydantic_model__
if issubclass(field_type, BaseModel):
model_name = model_name_map[field_type]
if field_type not in known_models:
sub_schema, sub_definitions, sub_nested_models = model_process_schema(
field_type,
by_alias=by_alias,
model_name_map=model_name_map,
ref_prefix=ref_prefix,
known_models=known_models,
)
definitions.update(sub_definitions)
definitions[model_name] = sub_schema
nested_models.update(sub_nested_models)
else:
nested_models.add(model_name)
schema_ref = {'$ref': ref_prefix + model_name}
if not schema_overrides:
return schema_ref, definitions, nested_models
else:
return {'allOf': [schema_ref]}, definitions, nested_models
raise ValueError(f'Value not declarable with JSON Schema, field: {field}')
|
3,543 | def delete_versions_from_db(project, version_data):
"""
Delete all versions not in the current repo.
:returns: The slug of the deleted versions from the database,
and the slug of active versions that where deleted from the repository.
"""
# We use verbose_name for tags
# because several tags can point to the same identifier.
versions_tags = [
version['verbose_name'] for version in version_data.get('tags', [])
]
versions_branches = [
version['identifier'] for version in version_data.get('branches', [])
]
to_delete_qs = (
project.versions
.exclude(uploaded=True)
.exclude(slug__in=NON_REPOSITORY_VERSIONS)
)
to_delete_qs = to_delete_qs.exclude(
type=TAG,
verbose_name__in=versions_tags,
)
to_delete_qs = to_delete_qs.exclude(
type=BRANCH,
identifier__in=versions_branches,
)
deleted_active_versions = set(
to_delete_qs.filter(active=True).values_list('slug', flat=True)
)
to_delete_qs = to_delete_qs.exclude(active=True)
deleted_versions = set(to_delete_qs.values_list('slug', flat=True))
if deleted_versions:
log.info(
'(Sync Versions) Deleted Versions: project=%s, versions=[%s]',
project.slug, ' '.join(deleted_versions),
)
to_delete_qs.delete()
return deleted_versions, deleted_active_versions
| def delete_versions_from_db(project, version_data):
"""
Delete all versions not in the current repo.
:returns: The slug of the deleted versions from the database,
and the slug of active versions that were deleted from the repository.
"""
# We use verbose_name for tags
# because several tags can point to the same identifier.
versions_tags = [
version['verbose_name'] for version in version_data.get('tags', [])
]
versions_branches = [
version['identifier'] for version in version_data.get('branches', [])
]
to_delete_qs = (
project.versions
.exclude(uploaded=True)
.exclude(slug__in=NON_REPOSITORY_VERSIONS)
)
to_delete_qs = to_delete_qs.exclude(
type=TAG,
verbose_name__in=versions_tags,
)
to_delete_qs = to_delete_qs.exclude(
type=BRANCH,
identifier__in=versions_branches,
)
deleted_active_versions = set(
to_delete_qs.filter(active=True).values_list('slug', flat=True)
)
to_delete_qs = to_delete_qs.exclude(active=True)
deleted_versions = set(to_delete_qs.values_list('slug', flat=True))
if deleted_versions:
log.info(
'(Sync Versions) Deleted Versions: project=%s, versions=[%s]',
project.slug, ' '.join(deleted_versions),
)
to_delete_qs.delete()
return deleted_versions, deleted_active_versions
|
57,817 | def main():
try:
args = demisto.args()
last_seen_gte = args.get('from')
last_seen_lte = args.get('to')
limit = args.get('limit', '100')
get_endpoints_args = {'limit': limit}
if last_seen_gte:
get_endpoints_args['last_seen_gte'] = last_seen_gte
if last_seen_lte and last_seen_lte != '0001-01-01T00:00:00Z':
get_endpoints_args['last_seen_lte'] = last_seen_lte
res = demisto.executeCommand('xdr-get-endpoints', get_endpoints_args)
if isError(res[0]):
return_error(f'Error occurred while trying to get XDR endpoints: {res[0].get("Contents")}')
endpoints = res[0]['Contents']
connected_endpoints = 0
for endpoint in endpoints:
if endpoint.get('endpoint_status') == 'CONNECTED':
connected_endpoints = connected_endpoints + 1
return_outputs(str(connected_endpoints))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute XDRConnectedEndpoints. Error: {str(ex)}')
| def main():
try:
args = demisto.args()
last_seen_gte = args.get('from')
last_seen_lte = args.get('to')
limit = args.get('limit', '100')
get_endpoints_args = {'limit': limit}
if last_seen_gte:
get_endpoints_args['last_seen_gte'] = last_seen_gte
if last_seen_lte and last_seen_lte != '0001-01-01T00:00:00Z':
get_endpoints_args['last_seen_lte'] = last_seen_lte
res = demisto.executeCommand('xdr-get-endpoints', get_endpoints_args)
if is_error(res):
return_error(f'Error occurred while trying to get XDR endpoints: {get_error(res)}')
endpoints = res[0]['Contents']
connected_endpoints = 0
for endpoint in endpoints:
if endpoint.get('endpoint_status') == 'CONNECTED':
connected_endpoints = connected_endpoints + 1
return_outputs(str(connected_endpoints))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute XDRConnectedEndpoints. Error: {str(ex)}')
|
12,767 | def _preprocessed_interpreter_search_paths(
env_tgt: EnvironmentTarget,
_search_paths: Iterable[str],
is_default: bool,
) -> tuple[str, ...]:
"""Checks for special search path strings, and errors if any are invalid for the environment.
This will return:
* The search paths, unaltered, for local/undefined environments, OR
* The search paths, with invalid tokens removed, if the provided value was unaltered from the
default value in the options system
(see `PythonBootstrapSubsystem.EnvironmentAware.search_paths`)
* The search paths unaltered, if the search paths do not contain tokens invalid for this
environment
If the environment is non-local and there are invalid tokens for those environments, raise
`ValueError`.
"""
env = env_tgt.val
search_paths = tuple(_search_paths)
if isinstance(env, LocalEnvironmentTarget):
return search_paths
if env is None:
return search_paths
not_allowed = {"<PYENV>", "<PYENV_LOCAL>", "<ASDF>", "<ASDF_LOCAL>", "<PEXRC>"}
if is_default:
# Strip out the not-allowed special strings from search_paths.
# An error will occur on the off chance the non-local environment expects pyenv
# but there's nothing we can do here to detect it.
return tuple(path for path in search_paths if path not in not_allowed)
any_not_allowed = set(search_paths) & not_allowed
if any_not_allowed:
env_type = type(env)
raise ValueError(
f"`[python-bootstrap].search_paths` is configured to use local Python discovery "
f"tools, which do not work in {env_type.__name__} runtime environments. To fix this, "
f"set the value of `python_bootstrap_search_path` in the {env.alias} defined at "
f"`{env.address}` to contain only hardcoded paths or the `<PATH>` special string."
)
return search_paths
| def _preprocessed_interpreter_search_paths(
env_tgt: EnvironmentTarget,
_search_paths: Iterable[str],
is_default: bool,
) -> tuple[str, ...]:
"""Checks for special search path strings, and errors if any are invalid for the environment.
This will return:
* The search paths, unaltered, for local/undefined environments, OR
* The search paths, with invalid tokens removed, if the provided value was unaltered from the
default value in the options system
(see `PythonBootstrapSubsystem.EnvironmentAware.search_paths`)
* The search paths unaltered, if the search paths do not contain tokens invalid for this
environment
If the environment is non-local and there are invalid tokens for those environments, raise
`ValueError`.
"""
env = env_tgt.val
search_paths = tuple(_search_paths)
if isinstance(env, LocalEnvironmentTarget):
return search_paths
if env is None:
return search_paths
not_allowed = {"<PYENV>", "<PYENV_LOCAL>", "<ASDF>", "<ASDF_LOCAL>", "<PEXRC>"}
if is_default:
# Strip out the not-allowed special strings from search_paths.
# An error will occur on the off chance the non-local environment expects pyenv
# but there's nothing we can do here to detect it.
return tuple(path for path in search_paths if path not in not_allowed)
any_not_allowed = set(search_paths) & not_allowed
if any_not_allowed:
env_type = type(env)
raise ValueError(
f"`[python-bootstrap].search_paths` is configured to use local Python discovery "
f"tools, which do not work in {env_type.__name__} runtime environments. To fix this, "
f"set the value of `python_bootstrap_search_path` in the {env.alias} defined at "
f"`{env.address}`) to contain only hardcoded paths or the `<PATH>` special string."
)
return search_paths
|
5,252 | def _load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL, binary_chunk_size=100 * 1024):
"""Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
Parameters
----------
fname : str
The file path to the saved word2vec-format file.
fvocab : str, optional
File path to the vocabulary.Word counts are read from `fvocab` filename, if set
(this is the file generated by `-save-vocab` flag of the original C tool).
binary : bool, optional
If True, indicates whether the data is in binary word2vec format.
encoding : str, optional
If you trained the C model using non-utf8 encoding for words, specify that encoding in `encoding`.
unicode_errors : str, optional
default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
limit : int, optional
Sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
datatype : type, optional
(Experimental) Can coerce dimensions to a non-default float type (such as `np.float16`) to save memory.
Such types may result in much slower bulk operations or incompatibility with optimized routines.)
binary_chunk_size : int, optional
Size of chunk in which binary files are read. Used mostly for testing. Defalut value 100 kB.
Returns
-------
object
Returns the loaded model as an instance of :class:`cls`.
"""
def __add_word_to_result(result, counts, word, weights):
word_id = len(result.vocab)
if word in result.vocab:
logger.warning("duplicate word '%s' in %s, ignoring all but first", word, fname)
return
if counts is None:
# most common scenario: no vocab file given. just make up some bogus counts, in descending order
result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)
elif word in counts:
# use count from the vocab file
result.vocab[word] = Vocab(index=word_id, count=counts[word])
else:
# vocab file given, but word is missing -- set count to None (TODO: or raise?)
logger.warning("vocabulary file is incomplete: '%s' is missing", word)
result.vocab[word] = Vocab(index=word_id, count=None)
result.vectors[word_id] = weights
result.index2word.append(word)
def __remove_initial_new_line(s):
i = 0
while i < len(s) and s[i] == '\n':
i += 1
return s[i:]
def __add_words_from_binary_chunk_to_result(result, counts, max_words, chunk, vector_size, datatype):
start = 0
n = len(chunk)
processed_words = 0
n_bytes_per_vector = vector_size * dtype(REAL).itemsize
for _ in range(0, max_words):
i_space = chunk.find(b' ', start)
i_vector = i_space + 1
if i_space != -1 and (n - i_vector) >= n_bytes_per_vector:
word = chunk[start:i_space].decode("utf-8", errors=unicode_errors)
# Some binary files are reported to have obsolete new line in the beginning of word, remove it
word = __remove_initial_new_line(word)
vector = frombuffer(chunk, offset=i_vector, count=vector_size, dtype=REAL).astype(datatype)
__add_word_to_result(result, counts, word, vector)
start = i_vector + n_bytes_per_vector
processed_words += 1
else:
break
return processed_words, chunk[start:]
from gensim.models.keyedvectors import Vocab
counts = None
if fvocab is not None:
logger.info("loading word counts from %s", fvocab)
counts = {}
with utils.open(fvocab, 'rb') as fin:
for line in fin:
word, count = utils.to_unicode(line, errors=unicode_errors).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s", fname)
with utils.open(fname, 'rb') as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format
if limit:
vocab_size = min(vocab_size, limit)
result = cls(vector_size)
result.vector_size = vector_size
result.vectors = zeros((vocab_size, vector_size), dtype=datatype)
if binary:
chunk = b''
tot_processed_words = 0
while tot_processed_words < vocab_size:
new_chunk = fin.read(binary_chunk_size)
chunk += new_chunk
max_words = vocab_size - len(result.vocab)
processed_words, chunk = __add_words_from_binary_chunk_to_result(result, counts, max_words,
chunk, vector_size, datatype)
tot_processed_words += processed_words
if len(new_chunk) < binary_chunk_size:
break
if tot_processed_words != vocab_size:
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
else:
for line_no in range(vocab_size):
line = fin.readline()
if line == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
word, weights = parts[0], [datatype(x) for x in parts[1:]]
__add_word_to_result(result, counts, word, weights)
if result.vectors.shape[0] != len(result.vocab):
logger.info(
"duplicate words detected, shrinking matrix size from %i to %i",
result.vectors.shape[0], len(result.vocab)
)
result.vectors = ascontiguousarray(result.vectors[: len(result.vocab)])
assert (len(result.vocab), vector_size) == result.vectors.shape
logger.info("loaded %s matrix from %s", result.vectors.shape, fname)
return result
| def _load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL, binary_chunk_size=100 * 1024):
"""Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
Parameters
----------
fname : str
The file path to the saved word2vec-format file.
fvocab : str, optional
File path to the vocabulary.Word counts are read from `fvocab` filename, if set
(this is the file generated by `-save-vocab` flag of the original C tool).
binary : bool, optional
If True, indicates whether the data is in binary word2vec format.
encoding : str, optional
If you trained the C model using non-utf8 encoding for words, specify that encoding in `encoding`.
unicode_errors : str, optional
default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
limit : int, optional
Sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
datatype : type, optional
(Experimental) Can coerce dimensions to a non-default float type (such as `np.float16`) to save memory.
Such types may result in much slower bulk operations or incompatibility with optimized routines.)
binary_chunk_size : int, optional
Read input file in chunks of this many bytes, for performance reasons.
Returns
-------
object
Returns the loaded model as an instance of :class:`cls`.
"""
def __add_word_to_result(result, counts, word, weights):
word_id = len(result.vocab)
if word in result.vocab:
logger.warning("duplicate word '%s' in %s, ignoring all but first", word, fname)
return
if counts is None:
# most common scenario: no vocab file given. just make up some bogus counts, in descending order
result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)
elif word in counts:
# use count from the vocab file
result.vocab[word] = Vocab(index=word_id, count=counts[word])
else:
# vocab file given, but word is missing -- set count to None (TODO: or raise?)
logger.warning("vocabulary file is incomplete: '%s' is missing", word)
result.vocab[word] = Vocab(index=word_id, count=None)
result.vectors[word_id] = weights
result.index2word.append(word)
def __remove_initial_new_line(s):
i = 0
while i < len(s) and s[i] == '\n':
i += 1
return s[i:]
def __add_words_from_binary_chunk_to_result(result, counts, max_words, chunk, vector_size, datatype):
start = 0
n = len(chunk)
processed_words = 0
n_bytes_per_vector = vector_size * dtype(REAL).itemsize
for _ in range(0, max_words):
i_space = chunk.find(b' ', start)
i_vector = i_space + 1
if i_space != -1 and (n - i_vector) >= n_bytes_per_vector:
word = chunk[start:i_space].decode("utf-8", errors=unicode_errors)
# Some binary files are reported to have obsolete new line in the beginning of word, remove it
word = __remove_initial_new_line(word)
vector = frombuffer(chunk, offset=i_vector, count=vector_size, dtype=REAL).astype(datatype)
__add_word_to_result(result, counts, word, vector)
start = i_vector + n_bytes_per_vector
processed_words += 1
else:
break
return processed_words, chunk[start:]
from gensim.models.keyedvectors import Vocab
counts = None
if fvocab is not None:
logger.info("loading word counts from %s", fvocab)
counts = {}
with utils.open(fvocab, 'rb') as fin:
for line in fin:
word, count = utils.to_unicode(line, errors=unicode_errors).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s", fname)
with utils.open(fname, 'rb') as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format
if limit:
vocab_size = min(vocab_size, limit)
result = cls(vector_size)
result.vector_size = vector_size
result.vectors = zeros((vocab_size, vector_size), dtype=datatype)
if binary:
chunk = b''
tot_processed_words = 0
while tot_processed_words < vocab_size:
new_chunk = fin.read(binary_chunk_size)
chunk += new_chunk
max_words = vocab_size - len(result.vocab)
processed_words, chunk = __add_words_from_binary_chunk_to_result(result, counts, max_words,
chunk, vector_size, datatype)
tot_processed_words += processed_words
if len(new_chunk) < binary_chunk_size:
break
if tot_processed_words != vocab_size:
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
else:
for line_no in range(vocab_size):
line = fin.readline()
if line == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
word, weights = parts[0], [datatype(x) for x in parts[1:]]
__add_word_to_result(result, counts, word, weights)
if result.vectors.shape[0] != len(result.vocab):
logger.info(
"duplicate words detected, shrinking matrix size from %i to %i",
result.vectors.shape[0], len(result.vocab)
)
result.vectors = ascontiguousarray(result.vectors[: len(result.vocab)])
assert (len(result.vocab), vector_size) == result.vectors.shape
logger.info("loaded %s matrix from %s", result.vectors.shape, fname)
return result
|
26,112 | def test_view_change_not_happen_if_ic_is_discarded(looper, txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
tconf, tdir, allPluginsPath):
"""
1. panic_node (Delta) send InstanceChange for all nodes.
2. Restart nodes_to_restart (Beta, Gamma).
3. Wait OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL sec.
4. nodes_to_restart send InstanceChanges for all nodes.
5. Ensure elections done.
"""
nodes_to_restart = txnPoolNodeSet[1:3]
panic_node = txnPoolNodeSet[-1]
view_no = txnPoolNodeSet[0].viewNo
panic_node.view_changer.on_master_degradation()
for n in nodes_to_restart:
_restart_node(looper, txnPoolNodeSet, n, tconf, tdir, allPluginsPath)
nodes_to_restart = txnPoolNodeSet[1:3]
# waiting to discard InstanceChange
looper.runFor(tconf.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL)
for n in nodes_to_restart:
n.view_changer.on_master_degradation()
def check():
assert all(panic_node.view_changer.instanceChanges.has_inst_chng_from(view_no + 1, node.name)
for node in nodes_to_restart)
assert not panic_node.view_changer.instanceChanges.has_inst_chng_from(view_no + 1, panic_node.name)
looper.run(eventually(check))
ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
for node in txnPoolNodeSet:
assert node.viewNo == view_no
| def test_view_change_not_happen_if_ic_is_discarded(looper, txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client,
tconf, tdir, allPluginsPath):
"""
1. panic_node (Delta) send InstanceChange for all nodes.
2. Restart nodes_to_restart (Beta, Gamma).
3. Wait OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL sec.
4. nodes_to_restart send InstanceChanges for all nodes.
5. Ensure elections done.
"""
nodes_to_restart = txnPoolNodeSet[1:3]
panic_node = txnPoolNodeSet[-1]
view_no = txnPoolNodeSet[0].viewNo
panic_node.view_changer.on_master_degradation()
for n in nodes_to_restart:
_restart_node(looper, txnPoolNodeSet, n, tconf, tdir, allPluginsPath)
nodes_to_restart = txnPoolNodeSet[1:3]
# waiting to discard InstanceChange
looper.runFor(tconf.OUTDATED_INSTANCE_CHANGES_CHECK_INTERVAL +1)
for n in nodes_to_restart:
n.view_changer.on_master_degradation()
def check():
assert all(panic_node.view_changer.instanceChanges.has_inst_chng_from(view_no + 1, node.name)
for node in nodes_to_restart)
assert not panic_node.view_changer.instanceChanges.has_inst_chng_from(view_no + 1, panic_node.name)
looper.run(eventually(check))
ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
for node in txnPoolNodeSet:
assert node.viewNo == view_no
|
4,224 | def annotate_muscle(raw, threshold=1.5, picks=None, min_length_good=.1):
"""Detect segments with muscle artifacts.
Detects segments periods that contains high frequency activity beyond the
specified threshold. Muscle artifacts are most notable in the range of 110-
140Hz.
Raw data is band pass filtered between 110 and 140 Hz, the signal envelope
computed, z-scored across samples, channel averaged and low-pass
filtered to smooth transient peaks.
Parameters
----------
raw : instance of Raw
Data to compute head position.
threshold : float
The threshod for selecting segments with muscle activity artifacts.
picks : array
Channels to use for artifact detection.
min_length_good : int | float | None
The minimal good segment length between annotations, smaller segments
will be included in the movement annotation.
Returns
-------
annot : mne.Annotations
Periods with muscle artifacts.
scores_muscle : array
Z-score values averaged accros channels for each sample.
"""
raw_copy = raw.copy()
raw_copy.pick(picks)
raw_copy.pick_types(ref_meg=False) # Remove ref chans just in case
# Only one type of channel, otherwise z-score will be biased
assert(len(set(raw_copy.get_channel_types())) == 1), 'Different channel ' \
'types, pick one type'
raw_copy.filter(110, 140, fir_design='firwin')
raw_copy.apply_hilbert(envelope=True)
sfreq = raw_copy.info['sfreq']
art_scores = zscore(raw_copy._data, axis=1)
scores_muscle = filter_data(art_scores.mean(axis=0), sfreq, None, 4)
art_mask = scores_muscle > threshold
# remove artifact free periods shorter than min_length_good
idx_min = min_length_good * sfreq
comps, num_comps = label(art_mask == 0)
for l in range(1, num_comps + 1):
l_idx = np.nonzero(comps == l)[0]
if len(l_idx) < idx_min:
art_mask[l_idx] = True
annot = _annotations_from_mask(raw_copy.times, art_mask, 'BAD_muscle')
return annot, scores_muscle
| def annotate_muscle(raw, threshold=1.5, picks=None, min_length_good=.1):
"""Detect segments with muscle artifacts.
Detects segments periods that contains high frequency activity beyond the
specified threshold. Muscle artifacts are most notable in the range of 110-
140Hz.
Raw data is band pass filtered between 110 and 140 Hz, the signal envelope
computed, z-scored across samples, channel averaged and low-pass
filtered to smooth transient peaks.
Parameters
----------
raw : instance of Raw
Data to compute head position.
threshold : float
The threshold for selecting segments with muscle activity artifacts.
picks : array
Channels to use for artifact detection.
min_length_good : int | float | None
The minimal good segment length between annotations, smaller segments
will be included in the movement annotation.
Returns
-------
annot : mne.Annotations
Periods with muscle artifacts.
scores_muscle : array
Z-score values averaged accros channels for each sample.
"""
raw_copy = raw.copy()
raw_copy.pick(picks)
raw_copy.pick_types(ref_meg=False) # Remove ref chans just in case
# Only one type of channel, otherwise z-score will be biased
assert(len(set(raw_copy.get_channel_types())) == 1), 'Different channel ' \
'types, pick one type'
raw_copy.filter(110, 140, fir_design='firwin')
raw_copy.apply_hilbert(envelope=True)
sfreq = raw_copy.info['sfreq']
art_scores = zscore(raw_copy._data, axis=1)
scores_muscle = filter_data(art_scores.mean(axis=0), sfreq, None, 4)
art_mask = scores_muscle > threshold
# remove artifact free periods shorter than min_length_good
idx_min = min_length_good * sfreq
comps, num_comps = label(art_mask == 0)
for l in range(1, num_comps + 1):
l_idx = np.nonzero(comps == l)[0]
if len(l_idx) < idx_min:
art_mask[l_idx] = True
annot = _annotations_from_mask(raw_copy.times, art_mask, 'BAD_muscle')
return annot, scores_muscle
|
23,114 | def check_index(axis, ind, dimension):
"""Check validity of index for a given dimension
Examples
--------
>>> check_index(0, 3, 5)
>>> check_index(0, 5, 5)
Traceback (most recent call last):
...
IndexError: Index 5 is out of bounds for axis 0 with size 5
>>> check_index(1, 6, 5)
Traceback (most recent call last):
...
IndexError: Index 6 is out of bounds for axis 1 with size 5
>>> check_index(1, -1, 5)
>>> check_index(1, -6, 5)
Traceback (most recent call last):
...
IndexError: Index -6 is out of bounds for axis 1 with size 5
>>> check_index(0, [1, 2], 5)
>>> check_index(0, [6, 3], 5)
Traceback (most recent call last):
...
IndexError: Index is out of bounds for axis 0 with size 5
>>> check_index(1, slice(0, 3), 5)
>>> check_index(0, [True], 1)
>>> check_index(0, [True, True], 3)
Traceback (most recent call last):
...
IndexError: Boolean array with size 2 is not long enough for axis 0 with size 3
>>> check_index(0, [True, True, True], 1)
Traceback (most recent call last):
...
IndexError: Boolean array with size 3 is not long enough for axis 0 with size 1
"""
if isinstance(ind, list):
ind = np.asanyarray(ind)
# unknown dimension, assumed to be in bounds
if np.isnan(dimension):
return
elif is_dask_collection(ind):
return
elif is_arraylike(ind):
if ind.dtype == bool:
if ind.size != dimension:
raise IndexError(
f"Boolean array with size {ind.size} is not long enough"
f"for axis {axis} with size {dimension}"
)
elif (ind >= dimension).any() or (ind < -dimension).any():
raise IndexError(
f"Index is out of bounds for axis {axis} with size {dimension}"
)
elif isinstance(ind, slice):
return
elif ind is None:
return
elif ind >= dimension or ind < -dimension:
raise IndexError(
f"Index {ind} is out of bounds for axis {axis} with size {dimension}"
)
| def check_index(axis, ind, dimension):
"""Check validity of index for a given dimension
Examples
--------
>>> check_index(0, 3, 5)
>>> check_index(0, 5, 5)
Traceback (most recent call last):
...
IndexError: Index 5 is out of bounds for axis 0 with size 5
>>> check_index(1, 6, 5)
Traceback (most recent call last):
...
IndexError: Index 6 is out of bounds for axis 1 with size 5
>>> check_index(1, -1, 5)
>>> check_index(1, -6, 5)
Traceback (most recent call last):
...
IndexError: Index -6 is out of bounds for axis 1 with size 5
>>> check_index(0, [1, 2], 5)
>>> check_index(0, [6, 3], 5)
Traceback (most recent call last):
...
IndexError: Index is out of bounds for axis 0 with size 5
>>> check_index(1, slice(0, 3), 5)
>>> check_index(0, [True], 1)
>>> check_index(0, [True, True], 3)
Traceback (most recent call last):
...
IndexError: Boolean array with size 2 is not long enough for axis 0 with size 3
>>> check_index(0, [True, True, True], 1)
Traceback (most recent call last):
...
IndexError: Boolean array with size 3 is not long enough for axis 0 with size 1
"""
if isinstance(ind, list):
ind = np.asanyarray(ind)
# unknown dimension, assumed to be in bounds
if np.isnan(dimension):
return
elif is_dask_collection(ind):
return
elif is_arraylike(ind):
if ind.dtype == bool:
if ind.size != dimension:
raise IndexError(
f"Boolean array with size {ind.size} is not long enough "
f"for axis {axis} with size {dimension}"
)
elif (ind >= dimension).any() or (ind < -dimension).any():
raise IndexError(
f"Index is out of bounds for axis {axis} with size {dimension}"
)
elif isinstance(ind, slice):
return
elif ind is None:
return
elif ind >= dimension or ind < -dimension:
raise IndexError(
f"Index {ind} is out of bounds for axis {axis} with size {dimension}"
)
|
5,444 | def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs):
"""
Install the named fileset(s)/rpm package(s).
.. versionadded:: 3005
preference to install rpm packages are to use in the following order:
/opt/freeware/bin/dnf
/opt/freeware/bin/yum
/usr/bin/yum
/usr/bin/rpm
.. note:
use of rpm to install implies that rpm's dependencies must have been previously installed.
dnf and yum automatically install rpm's dependencies as part of the install process
Alogrithm to install filesets or rpms is as follows:
if ends with '.rte' or '.bff'
process as fileset
if ends with '.rpm'
process as rpm
if unrecognised or no file extension
attempt process with dnf | yum
failure implies attempt process as fileset
Fileset needs to be available as a single path and filename
compound filesets are not handled and are not supported
an example is bos.adt.insttools which is part of bos.adt.other and is installed as follows
/usr/bin/installp -acXYg /cecc/repos/aix72/TL4/BASE/installp/ppc/bos.adt.other bos.adt.insttools
name
The name of the fileset or rpm package to be installed.
refresh
Whether or not to update the yum database before executing.
pkgs
A list of filesets and/or rpm packages to install.
Must be passed as a python list. The ``name`` parameter will be
ignored if this option is passed.
version
Install a specific version of a fileset/rpm package.
(Unused at present).
test
Verify that command functions correctly.
Returns a dict containing the new fileset(s)/rpm package(s) names and versions:
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm
salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True
salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff
salt '*' pkg.install /cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff
salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte
salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base
salt '*' pkg.install /cecc/repos/aix72/TL3/BASE/installp/ppc/bos.net
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install libxml2
"""
targets = salt.utils.args.split_input(pkgs) if pkgs else [name]
if not targets:
return {}
if pkgs:
log.debug(f"Installing these fileset(s)/rpm package(s) '{name}': '{targets}'")
# Get a list of the currently installed pkgs.
old = list_pkgs()
# Install the fileset (normally ends with bff or rte) or rpm package(s)
errors = []
for target in targets:
filename = os.path.basename(target)
flag_fileset = False
flag_actual_rpm = False
flag_try_rpm_failed = False
cmd = ""
out = {}
if filename.endswith(".bff") or filename.endswith(".rte"):
flag_fileset = True
log.debug(f"install identified '{filename}' as fileset")
else:
if filename.endswith(".rpm"):
flag_actual_rpm = True
log.debug(f"install identified '{filename}' as rpm")
else:
log.debug(f"install, filename '{filename}' trying install as rpm")
# assume use dnf or yum
cmdflags = "install "
libpathenv = {"LIBPATH": "/opt/freeware/lib:/usr/lib"}
if pathlib.Path("/opt/freeware/bin/dnf").is_file():
cmdflags += "--allowerasing "
cmdexe = "/opt/freeware/bin/dnf"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
if refresh:
cmdflags += "--refresh "
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
elif pathlib.Path("/usr/bin/yum").is_file():
# check for old yum first, removed if new dnf or yum
cmdexe = "/usr/bin/yum"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
elif pathlib.Path("/opt/freeware/bin/yum").is_file():
cmdflags += "--allowerasing "
cmdexe = "/opt/freeware/bin/yum"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
if refresh:
cmdflags += "--refresh "
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
else:
cmdexe = "/usr/bin/rpm"
cmdflags = "-Uivh "
if test:
cmdflags += "--test"
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](cmd, python_shell=False)
log.debug(f"result of command '{cmd}', out '{out}'")
if "retcode" in out and not (0 == out["retcode"] or 100 == out["retcode"]):
if not flag_actual_rpm:
flag_try_rpm_failed = True
log.debug(
f"install tried filename '{filename}' as rpm and failed, trying as fileset"
)
else:
errors.append(out["stderr"])
log.debug(
f"install error rpm path, out '{out}', resultant errors '{errors}'"
)
if flag_fileset or flag_try_rpm_failed:
# either identified as fileset, or failed trying install as rpm, try as fileset
cmd = "/usr/sbin/installp -acYXg"
if test:
cmd += "p"
cmd += " -d "
dirpath = os.path.dirname(target)
cmd += dirpath + " " + filename
log.debug(f"install fileset command '{cmd}'")
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if 0 != out["retcode"]:
errors.append(out["stderr"])
log.debug(
f"install error fileset path, out '{out}', resultant errors '{errors}'"
)
# Get a list of the packages after the uninstall
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problems encountered installing filesets(s)/package(s)",
info={"changes": ret, "errors": errors},
)
# No error occurred
if test:
return "Test succeeded."
return ret
| def install(name=None, refresh=False, pkgs=None, version=None, test=False, **kwargs):
"""
Install the named fileset(s)/rpm package(s).
.. versionadded:: 3005
preference to install rpm packages are to use in the following order:
/opt/freeware/bin/dnf
/opt/freeware/bin/yum
/usr/bin/yum
/usr/bin/rpm
.. note:
use of rpm to install implies that rpm's dependencies must have been previously installed.
dnf and yum automatically install rpm's dependencies as part of the install process
Alogrithm to install filesets or rpms is as follows:
if ends with '.rte' or '.bff'
process as fileset
if ends with '.rpm'
process as rpm
if unrecognised or no file extension
attempt process with dnf | yum
failure implies attempt process as fileset
Fileset needs to be available as a single path and filename
compound filesets are not handled and are not supported.
an example is bos.adt.insttools which is part of bos.adt.other and is installed as follows
/usr/bin/installp -acXYg /cecc/repos/aix72/TL4/BASE/installp/ppc/bos.adt.other bos.adt.insttools
name
The name of the fileset or rpm package to be installed.
refresh
Whether or not to update the yum database before executing.
pkgs
A list of filesets and/or rpm packages to install.
Must be passed as a python list. The ``name`` parameter will be
ignored if this option is passed.
version
Install a specific version of a fileset/rpm package.
(Unused at present).
test
Verify that command functions correctly.
Returns a dict containing the new fileset(s)/rpm package(s) names and versions:
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm
salt '*' pkg.install /stage/middleware/AIX/bash-4.2-3.aix6.1.ppc.rpm refresh=True
salt '*' pkg.install /stage/middleware/AIX/VIOS2211_update/tpc_4.1.1.85.bff
salt '*' pkg.install /cecc/repos/aix72/TL3/BASE/installp/ppc/bos.rte.printers_7.2.2.0.bff
salt '*' pkg.install /stage/middleware/AIX/Xlc/usr/sys/inst.images/xlC.rte
salt '*' pkg.install /stage/middleware/AIX/Firefox/ppc-AIX53/Firefox.base
salt '*' pkg.install /cecc/repos/aix72/TL3/BASE/installp/ppc/bos.net
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install libxml2
"""
targets = salt.utils.args.split_input(pkgs) if pkgs else [name]
if not targets:
return {}
if pkgs:
log.debug(f"Installing these fileset(s)/rpm package(s) '{name}': '{targets}'")
# Get a list of the currently installed pkgs.
old = list_pkgs()
# Install the fileset (normally ends with bff or rte) or rpm package(s)
errors = []
for target in targets:
filename = os.path.basename(target)
flag_fileset = False
flag_actual_rpm = False
flag_try_rpm_failed = False
cmd = ""
out = {}
if filename.endswith(".bff") or filename.endswith(".rte"):
flag_fileset = True
log.debug(f"install identified '{filename}' as fileset")
else:
if filename.endswith(".rpm"):
flag_actual_rpm = True
log.debug(f"install identified '{filename}' as rpm")
else:
log.debug(f"install, filename '{filename}' trying install as rpm")
# assume use dnf or yum
cmdflags = "install "
libpathenv = {"LIBPATH": "/opt/freeware/lib:/usr/lib"}
if pathlib.Path("/opt/freeware/bin/dnf").is_file():
cmdflags += "--allowerasing "
cmdexe = "/opt/freeware/bin/dnf"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
if refresh:
cmdflags += "--refresh "
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
elif pathlib.Path("/usr/bin/yum").is_file():
# check for old yum first, removed if new dnf or yum
cmdexe = "/usr/bin/yum"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
elif pathlib.Path("/opt/freeware/bin/yum").is_file():
cmdflags += "--allowerasing "
cmdexe = "/opt/freeware/bin/yum"
if test:
cmdflags += "--assumeno "
else:
cmdflags += "--assumeyes "
if refresh:
cmdflags += "--refresh "
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](
cmd,
python_shell=False,
env=libpathenv,
ignore_retcode=True,
)
else:
cmdexe = "/usr/bin/rpm"
cmdflags = "-Uivh "
if test:
cmdflags += "--test"
cmd = f"{cmdexe} {cmdflags} {target}"
out = __salt__["cmd.run_all"](cmd, python_shell=False)
log.debug(f"result of command '{cmd}', out '{out}'")
if "retcode" in out and not (0 == out["retcode"] or 100 == out["retcode"]):
if not flag_actual_rpm:
flag_try_rpm_failed = True
log.debug(
f"install tried filename '{filename}' as rpm and failed, trying as fileset"
)
else:
errors.append(out["stderr"])
log.debug(
f"install error rpm path, out '{out}', resultant errors '{errors}'"
)
if flag_fileset or flag_try_rpm_failed:
# either identified as fileset, or failed trying install as rpm, try as fileset
cmd = "/usr/sbin/installp -acYXg"
if test:
cmd += "p"
cmd += " -d "
dirpath = os.path.dirname(target)
cmd += dirpath + " " + filename
log.debug(f"install fileset command '{cmd}'")
out = __salt__["cmd.run_all"](cmd, python_shell=False)
if 0 != out["retcode"]:
errors.append(out["stderr"])
log.debug(
f"install error fileset path, out '{out}', resultant errors '{errors}'"
)
# Get a list of the packages after the uninstall
__context__.pop("pkg.list_pkgs", None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
"Problems encountered installing filesets(s)/package(s)",
info={"changes": ret, "errors": errors},
)
# No error occurred
if test:
return "Test succeeded."
return ret
|
24,881 | def _loop_exits_early(loop):
"""
Returns true if a loop mays end up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop mays end up in a break statement, False otherwise.
"""
loop_nodes = (nodes.For, nodes.While)
definition_nodes = (nodes.FunctionDef, nodes.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(nodes.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
| def _loop_exits_early(loop):
"""
Returns true if a loop mays end up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may end with a break statement, False otherwise.
"""
loop_nodes = (nodes.For, nodes.While)
definition_nodes = (nodes.FunctionDef, nodes.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(nodes.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
|
13,564 | def QR_iteration(H, shifts):
"""Perform the QR iteration.
Performs a QR step for each shift provided in `shifts`. `H` is assumed to be an
unreduced upper Hessenberg matrix. If a complex shift occurs a double step is
peformed in order to avoid complex arithmetic.
Parameters
----------
H
The |NumPy array| H which is an unreduced upper Hessenberg matrix.
shifts
A |NumPy array| which contains the shifts that are to be applied in the QR steps.
Returns
-------
Hs
A |NumPy array| in upper Hessenberg form such that it holds :math:`H Q_s = Q_s H_s`.
Qs
The product of the orthogonal matrices computed in each QR step.
"""
Qs = np.eye(len(H))
i = 0
while i < len(shifts) - 1:
s = shifts[i]
if shifts[i].imag != 0:
Q, R = np.linalg.qr(H @ H - 2 * np.real(s) * H + np.abs(s)**2 * np.eye(len(H)))
i = i + 2
else:
Q, R = np.linalg.qr(H - s * np.eye(len(H)))
i = i + 1
Qs = Qs @ Q
H = Q.T @ H @ Q
return H, Qs
| def QR_iteration(H, shifts):
"""Perform the QR iteration.
Performs a QR step for each shift provided in `shifts`. `H` is assumed to be an
unreduced upper Hessenberg matrix. If a complex shift occurs a double step is
peformed in order to avoid complex arithmetic.
Parameters
----------
H
The |NumPy array| H which is an unreduced upper Hessenberg matrix.
shifts
A |NumPy array| which contains the shifts that are to be applied in the QR steps.
Returns
-------
Hs
A |NumPy array| in upper Hessenberg form such that it holds :math:`H Q_s = Q_s H_s`.
Qs
The product of the orthogonal matrices computed in each QR step.
"""
Qs = np.eye(len(H))
i = 0
while i < len(shifts) - 1:
s = shifts[i]
if shifts[i].imag != 0:
Q, R = np.linalg.qr(H @ H - 2 * np.real(s) * H + np.abs(s)**2 * np.eye(len(H)))
i += 2
else:
Q, R = np.linalg.qr(H - s * np.eye(len(H)))
i = i + 1
Qs = Qs @ Q
H = Q.T @ H @ Q
return H, Qs
|
28,583 | def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
observed=True,
color=None,
colors=None,
grid=None,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
labeller=None,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data: :class:`arviz.InferenceData` object
InferenceData object containing the observed and posterior/prior predictive data.
kind: str
Type of plot to display ("kde", "cumulative", or "scatter"). Defaults to `kde`.
alpha: float
Opacity of posterior/prior predictive density curves.
Defaults to `0.2` for ``kind`` = kde and cumulative, for scatter defaults to `0.7`.
mean: bool
Whether or not to plot the mean posterior/prior predictive distribution.
Defaults to ``True``.
observed: bool, default ``True``
Whether or not to plot the observed data.
color: str
Valid matplotlib ``color``. Defaults to `C0`.
color: list
List with valid matplotlib colors corresponding to the posterior/prior predictive
distribution, observed data and mean of the posterior/prior predictive distribution.
Defaults to ["C0", "k", "C1"].
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None, it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
data_pairs: dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names: list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten: list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the ``coords`` argument. Defaults to flattening all of the dimensions.
flatten_pp: list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the ``coords`` argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for ``data_pairs``
parameters. If ``flatten`` is defined and ``flatten_pp`` is None, then
``flatten_pp`` = `flatten`.
num_pp_samples: int
The number of posterior/prior predictive samples to plot. For ``kind`` = 'scatter' and
`animation` = ``False`` if defaults to a maximum of 5 samples and will set jitter to `0.7`.
unless defined. Otherwise it defaults to all provided samples.
random_seed: int
Random number generator seed passed to ``numpy.random.seed`` to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by ``num_pp_samples``.
jitter: float
If ``kind`` is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default `0`.
animated: bool
Create an animation of one posterior/prior predictive sample per frame.
Defaults to ``False``. Only works with matploblib backend.
To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend.
Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the
default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`.
If switching back and forth between matplotlib's backend, you may need to run twice the cell
with the animation.
If you experience problems rendering the animation try setting
`animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg)
If you run the animation from a script write `ax, ani = az.plot_ppc(.)`
animation_kwargs : dict
Keywords passed to ``animation.FuncAnimation``. Ignored with matplotlib backend.
legend : bool
Add legend to figure. By default ``True``.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
group: {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to `'posterior'`.
Other value can be `'prior'`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_bvp: Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data, data_pairs={"y":"y"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the ``coords`` and ``flatten`` parameters to plot selected variable dimensions
across multiple plots. We will now modify the dimension `obs_id` to contain
indicate the name of the county where the measure was taken. The change has to
be done on both ``posterior_predictive`` and ``observed_data`` groups, which is
why we will use :meth:`~arviz.InferenceData.map` to apply the same function to
both groups. Afterwards, we will select the counties to be plotted with the
``coords`` arg.
.. plot::
:context: close-figs
>>> obs_county = data.posterior["County"][data.constant_data["county_idx"]]
>>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars")
>>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'obs_id': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}" for ppcplot')
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if colors is None:
colors = ["C0", "k", "C1"]
if isinstance(colors, str):
raise TypeError("colors should be a list with 3 items.")
if len(colors) != 3:
raise ValueError("colors should be a list with 3 items.")
if color is not None:
warnings.warn("color has been deprecated in favor of colors", FutureWarning)
colors[0] = color
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
if animated:
raise TypeError("Animation option is only supported with matplotlib backend.")
observed_data = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed_data.data_vars)
var_names = _var_names(var_names, observed_data, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed_data.dims.keys())
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and " + f"{total_pp_samples}."
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed_data.isel(coords),
skip_dims=set(flatten),
var_names=var_names,
combined=True,
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
colors=colors,
jitter=jitter,
textsize=textsize,
mean=mean,
observed=observed,
total_pp_samples=total_pp_samples,
legend=legend,
labeller=labeller,
group=group,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
| def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
observed=True,
color=None,
colors=None,
grid=None,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
labeller=None,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data: :class:`arviz.InferenceData` object
InferenceData object containing the observed and posterior/prior predictive data.
kind: str
Type of plot to display ("kde", "cumulative", or "scatter"). Defaults to `kde`.
alpha: float
Opacity of posterior/prior predictive density curves.
Defaults to `0.2` for ``kind`` = kde and cumulative, for scatter defaults to `0.7`.
mean: bool
Whether or not to plot the mean posterior/prior predictive distribution.
Defaults to ``True``.
observed: bool, default ``True``
Whether or not to plot the observed data.
color: str
Valid matplotlib ``color``. Defaults to ``C0``.
color: list
List with valid matplotlib colors corresponding to the posterior/prior predictive
distribution, observed data and mean of the posterior/prior predictive distribution.
Defaults to ["C0", "k", "C1"].
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None, it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
data_pairs: dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names: list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten: list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the ``coords`` argument. Defaults to flattening all of the dimensions.
flatten_pp: list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the ``coords`` argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for ``data_pairs``
parameters. If ``flatten`` is defined and ``flatten_pp`` is None, then
``flatten_pp`` = `flatten`.
num_pp_samples: int
The number of posterior/prior predictive samples to plot. For ``kind`` = 'scatter' and
`animation` = ``False`` if defaults to a maximum of 5 samples and will set jitter to `0.7`.
unless defined. Otherwise it defaults to all provided samples.
random_seed: int
Random number generator seed passed to ``numpy.random.seed`` to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by ``num_pp_samples``.
jitter: float
If ``kind`` is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default `0`.
animated: bool
Create an animation of one posterior/prior predictive sample per frame.
Defaults to ``False``. Only works with matploblib backend.
To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend.
Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the
default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`.
If switching back and forth between matplotlib's backend, you may need to run twice the cell
with the animation.
If you experience problems rendering the animation try setting
`animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg)
If you run the animation from a script write `ax, ani = az.plot_ppc(.)`
animation_kwargs : dict
Keywords passed to ``animation.FuncAnimation``. Ignored with matplotlib backend.
legend : bool
Add legend to figure. By default ``True``.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
group: {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to `'posterior'`.
Other value can be `'prior'`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_bvp: Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data, data_pairs={"y":"y"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the ``coords`` and ``flatten`` parameters to plot selected variable dimensions
across multiple plots. We will now modify the dimension `obs_id` to contain
indicate the name of the county where the measure was taken. The change has to
be done on both ``posterior_predictive`` and ``observed_data`` groups, which is
why we will use :meth:`~arviz.InferenceData.map` to apply the same function to
both groups. Afterwards, we will select the counties to be plotted with the
``coords`` arg.
.. plot::
:context: close-figs
>>> obs_county = data.posterior["County"][data.constant_data["county_idx"]]
>>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars")
>>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'obs_id': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}" for ppcplot')
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if colors is None:
colors = ["C0", "k", "C1"]
if isinstance(colors, str):
raise TypeError("colors should be a list with 3 items.")
if len(colors) != 3:
raise ValueError("colors should be a list with 3 items.")
if color is not None:
warnings.warn("color has been deprecated in favor of colors", FutureWarning)
colors[0] = color
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
if animated:
raise TypeError("Animation option is only supported with matplotlib backend.")
observed_data = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed_data.data_vars)
var_names = _var_names(var_names, observed_data, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed_data.dims.keys())
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and " + f"{total_pp_samples}."
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed_data.isel(coords),
skip_dims=set(flatten),
var_names=var_names,
combined=True,
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
colors=colors,
jitter=jitter,
textsize=textsize,
mean=mean,
observed=observed,
total_pp_samples=total_pp_samples,
legend=legend,
labeller=labeller,
group=group,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
|
38,271 | def build_node_set(node, s=None):
"""Build a set of all the nodes in a rapidz graph
Parameters
----------
node : Stream
The node to use as a starting point for building the set
s : set or None
The set to put the nodes into. If None return a new set full of nodes
Returns
-------
s : set
The set of nodes in the graph
"""
if s is None:
s = set()
if node is None or (
node in s
and all(n in s for n in node.upstreams)
and all(n in s for n in node.downstreams)
):
return
s.add(node)
s.update(node.upstreams)
s.update({n for n in node.downstreams})
[build_node_set(n, s) for n in list(s)]
return s
| def build_node_set(node, s=None):
"""Build a set of all the nodes in a streamz graph
Parameters
----------
node : Stream
The node to use as a starting point for building the set
s : set or None
The set to put the nodes into. If None return a new set full of nodes
Returns
-------
s : set
The set of nodes in the graph
"""
if s is None:
s = set()
if node is None or (
node in s
and all(n in s for n in node.upstreams)
and all(n in s for n in node.downstreams)
):
return
s.add(node)
s.update(node.upstreams)
s.update({n for n in node.downstreams})
[build_node_set(n, s) for n in list(s)]
return s
|
3,425 | def _symbolicate(profile: MutableMapping[str, Any], project: Project) -> MutableMapping[str, Any]:
symbolicator = Symbolicator(project=project, event_id=profile["profile_id"])
modules = profile["debug_meta"]["images"]
stacktraces = [
{
"registers": {},
"frames": s["frames"],
}
for s in profile["sampled_profile"]["samples"]
]
symbolication_start_time = time()
while True:
try:
response = symbolicator.process_payload(stacktraces=stacktraces, modules=modules)
# make sure we're getting the same number of stacktraces back
assert len(profile["sampled_profile"]["samples"]) == len(response["stacktraces"])
for i, (original, symbolicated) in enumerate(
zip(profile["sampled_profile"]["samples"], response["stacktraces"])
):
# make sure we're getting the same number of frames back
assert len(original["frames"]) == len(symbolicated["frames"])
for symbolicated_frame in symbolicated["frames"]:
original_frame = original["frames"][symbolicated_frame["original_index"]]
# preserve original values
new_frame = {}
for k, v in original_frame.items():
new_frame[f"_{k}"] = v
new_frame.update(symbolicated_frame)
profile["sampled_profile"]["samples"][i]["frames"][
symbolicated_frame["original_index"]
] = new_frame
break
except RetrySymbolication as e:
if (
time() - symbolication_start_time
) > settings.SYMBOLICATOR_PROCESS_EVENT_HARD_TIMEOUT:
break
else:
sleep_time = (
settings.SYMBOLICATOR_MAX_RETRY_AFTER
if e.retry_after is None
else min(e.retry_after, settings.SYMBOLICATOR_MAX_RETRY_AFTER)
)
sleep(sleep_time)
continue
except Exception:
break
# remove debug information we don't need anymore
profile.pop("debug_meta")
# rename the profile key to suggest it has been processed
profile["profile"] = profile.pop("sampled_profile")
return profile
| def _symbolicate(profile: MutableMapping[str, Any], project: Project) -> MutableMapping[str, Any]:
symbolicator = Symbolicator(project=project, event_id=profile["profile_id"])
modules = profile["debug_meta"]["images"]
stacktraces = [
{
"registers": {},
"frames": s["frames"],
}
for s in profile["sampled_profile"]["samples"]
]
symbolication_start_time = time()
while True:
try:
response = symbolicator.process_payload(stacktraces=stacktraces, modules=modules)
# make sure we're getting the same number of stacktraces back
assert len(profile["sampled_profile"]["samples"]) == len(response["stacktraces"])
for i, (original, symbolicated) in enumerate(
zip(profile["sampled_profile"]["samples"], response["stacktraces"])
):
# make sure we're getting the same number of frames back
assert len(original["frames"]) == len(symbolicated["frames"])
for symbolicated_frame in symbolicated["frames"]:
original_frame = original["frames"][symbolicated_frame["original_index"]]
# preserve original values
new_frame = {}
for k, v in original_frame.items():
new_frame[f"_{k}"] = v
new_frame.update(symbolicated_frame)
original["frames"][symbolicated_frame["original_index"]] = new_frame
break
except RetrySymbolication as e:
if (
time() - symbolication_start_time
) > settings.SYMBOLICATOR_PROCESS_EVENT_HARD_TIMEOUT:
break
else:
sleep_time = (
settings.SYMBOLICATOR_MAX_RETRY_AFTER
if e.retry_after is None
else min(e.retry_after, settings.SYMBOLICATOR_MAX_RETRY_AFTER)
)
sleep(sleep_time)
continue
except Exception:
break
# remove debug information we don't need anymore
profile.pop("debug_meta")
# rename the profile key to suggest it has been processed
profile["profile"] = profile.pop("sampled_profile")
return profile
|
31,062 | def main():
try:
if demisto.command() == 'test-module':
# Tests connectivity and credentails on login
# generateStartEndDates(1)
return "ok"
elif demisto.command() == 'ironportQuarantineReleaseEmail':
mesId = demisto.args().get('mid')
ironportQuarantineReleaseEmail(mesId)
elif demisto.command() == 'ironportSpamReleaseEmail':
mesId = demisto.args().get('mid')
ironportSpamReleaseEmail(mesId)
elif demisto.command() == 'ironPortSearchQuarantines':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearchQuarantines(period, senderPattern, recipientPattern, subjectPattern, limit)
elif demisto.command() == 'ironPortSearchSpam':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearchSpam(period, senderPattern, recipientPattern, subjectPattern, limit)
elif demisto.command() == 'ironPortSearch':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearch(period, senderPattern, recipientPattern, subjectPattern, limit)
except Exception as e:
LOG.print_log(e)
#
| def main():
try:
if demisto.command() == 'test-module':
# Tests connectivity and credentails on login
# generateStartEndDates(1)
return "ok"
elif demisto.command() == 'iron-port-quarantine-release-email':
mesId = demisto.args().get('mid')
ironportQuarantineReleaseEmail(mesId)
elif demisto.command() == 'ironportSpamReleaseEmail':
mesId = demisto.args().get('mid')
ironportSpamReleaseEmail(mesId)
elif demisto.command() == 'ironPortSearchQuarantines':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearchQuarantines(period, senderPattern, recipientPattern, subjectPattern, limit)
elif demisto.command() == 'ironPortSearchSpam':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearchSpam(period, senderPattern, recipientPattern, subjectPattern, limit)
elif demisto.command() == 'ironPortSearch':
period = demisto.args().get('periodInDays')
# senderPattern=""
senderPattern = demisto.args().get('senderPattern')
recipientPattern = demisto.args().get('recipientPattern')
subjectPattern = demisto.args().get('subjectPattern')
limit = demisto.args().get('limit')
# print("senderPattern :",senderPattern)
ironPortSearch(period, senderPattern, recipientPattern, subjectPattern, limit)
except Exception as e:
LOG.print_log(e)
#
|
42,781 | def test_dataframe_multiIndex_index():
"""Test for multiIndex dataframe"""
data = {
"x":
pd.DataFrame([[2, 3], [6, 7]],
index=pd.MultiIndex.from_arrays([['a', 'b'], ['y', 'z']]))
}
with pytest.raises(ValueError):
assert expand_grid(others=data)
| def test_dataframe_multi_index_index():
"""Test for multiIndex dataframe"""
data = {
"x":
pd.DataFrame([[2, 3], [6, 7]],
index=pd.MultiIndex.from_arrays([['a', 'b'], ['y', 'z']]))
}
with pytest.raises(ValueError):
assert expand_grid(others=data)
|
47,982 | def init_telemetry():
try:
import openvino_telemetry as tm # pylint:disable=C0415
except ImportError:
return None
try:
telemetry = tm.Telemetry('Accuracy Checker', version=__version__, tid='UA-194864834-1')
return telemetry
except Exception: # pylint:disable=W0703
return None
| def init_telemetry():
try:
import openvino_telemetry as tm # pylint:disable=C0415
except ImportError:
return None
try:
telemetry = tm.Telemetry('Accuracy Checker', app_version= __version__, tid='UA-194864834-1')
return telemetry
except Exception: # pylint:disable=W0703
return None
|
31,766 | def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
headers['PRIVATE-TOKEN'] = f'{params["api_key"]}'
command = demisto.command()
LOG(f'Command being called is {command}')
try:
urllib3.disable_warnings()
client = Client(urljoin(url, ""), verify_certificate, proxy, headers=headers)
commands = {
'gitlab-get-projects': get_projects_command,
'gitlab-projects-get-access-requests': projects_get_access_requests_command,
'gitlab-projects-request-access': projects_request_access_command,
'gitlab-projects-approve-access': projects_approve_access_command,
'gitlab-projects-deny-access': projects_deny_access_command,
'gitlab-projects-get-repository-branches': projects_get_repository_branches_command,
'gitlab-projects-create-repository-branch': projects_create_repository_branch_command,
'gitlab-projects-delete-repository-branch': projects_delete_repository_branch_command,
'gitlab-projects-delete-repository-merged-branches': projects_delete_repository_merged_branches_command,
'gitlab-get-version': get_version_command,
'gitlab-pipelines-schedules-list': gitlab_pipelines_schedules_list_command,
'gitlab-pipelines-list': gitlab_pipelines_list_command,
'gitlab-jobs-list': gitlab_jobs_list_command,
'gitlab-artifact-get': gitlab_artifact_get_command,
'gitlab-merge-requests-list': gitlab_merge_requests_list_command,
'gitlab-get-merge-request': gitlab_get_merge_request_command,
'gitlab-issues-list': gitlab_issues_list_command,
'gitlab-create-issue': gitlab_create_issue_command,
'gitlab-edit-issue': gitlab_edit_issue_command,
'gitlab-group-projects-list': gitlab_group_projects_list_command,
'gitlab-get-raw-file': gitlab_get_raw_file_command
}
if command == 'test-module':
test_module(client)
else:
return_results(commands[command](client, args))
except Exception as e:
return_error(str(e))
| def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
headers['PRIVATE-TOKEN'] = f'{params["api_key"]}'
command = demisto.command()
LOG(f'Command being called is {command}')
try:
urllib3.disable_warnings()
client = Client(urljoin(url, ""), verify_certificate, proxy, headers=headers)
commands = {
'gitlab-get-projects': get_projects_command,
'gitlab-projects-get-access-requests': projects_get_access_requests_command,
'gitlab-projects-request-access': projects_request_access_command,
'gitlab-projects-approve-access': projects_approve_access_command,
'gitlab-projects-deny-access': projects_deny_access_command,
'gitlab-projects-get-repository-branches': projects_get_repository_branches_command,
'gitlab-projects-create-repository-branch': projects_create_repository_branch_command,
'gitlab-projects-delete-repository-branch': projects_delete_repository_branch_command,
'gitlab-projects-delete-repository-merged-branches': projects_delete_repository_merged_branches_command,
'gitlab-get-version': get_version_command,
'gitlab-pipelines-schedules-list': gitlab_pipelines_schedules_list_command,
'gitlab-pipelines-list': gitlab_pipelines_list_command,
'gitlab-jobs-list': gitlab_jobs_list_command,
'gitlab-artifact-get': gitlab_artifact_get_command,
'gitlab-merge-requests-list': gitlab_merge_requests_list_command,
'gitlab-get-merge-request': gitlab_get_merge_request_command,
'gitlab-issues-list': gitlab_issues_list_command,
'gitlab-issue-create': gitlab_create_issue_command,
'gitlab-edit-issue': gitlab_edit_issue_command,
'gitlab-group-projects-list': gitlab_group_projects_list_command,
'gitlab-get-raw-file': gitlab_get_raw_file_command
}
if command == 'test-module':
test_module(client)
else:
return_results(commands[command](client, args))
except Exception as e:
return_error(str(e))
|
27,941 | def main():
parser = argparse.ArgumentParser('Train a neural network on MNIST dataset')
parser.add_argument(
'--batchsize', '-B', type=int, default=100, help='Batch size')
parser.add_argument(
'--epoch', '-E', type=int, default=20,
help='Number of epochs to train')
parser.add_argument(
'--iteration', '-I', type=int, default=None,
help='Number of iterations to train. Epoch is ignored if specified.')
parser.add_argument(
'--data', '-p', default='mnist',
help='Path to the directory that contains MNIST dataset')
parser.add_argument(
'--device', '-d', default='native', help='Device to use')
parser.add_argument(
'--eval-size', default=None, type=int,
help='Number of samples to use from the test set for evaluation. '
'None to use all.')
args = parser.parse_args()
chx.set_default_device(args.device)
# Prepare dataset
X, Y = get_mnist(args.data, 'train')
X_test, Y_test = get_mnist(args.data, 't10k')
# Prepare model
model = MLP()
# Training
N = X.shape[0] # TODO(beam2d): implement len
# TODO(beam2d): support int32 indexing
all_indices_np = np.arange(N, dtype=np.int64)
batch_size = args.batchsize
eval_size = args.eval_size
# Train
model.require_grad()
it = 0
epoch = 0
is_finished = False
start = time.time()
while not is_finished:
# TODO(beam2d): not suupported in chx
np.random.shuffle(all_indices_np)
all_indices = chx.array(all_indices_np)
for i in range(0, N, batch_size):
indices = all_indices[i:i + batch_size]
x = X.take(indices, axis=0)
t = Y.take(indices, axis=0)
y = model.forward(x)
loss = chx.softmax_cross_entropy(y, t)
loss.backward()
model.update(lr=0.01)
it += 1
if args.iteration is not None:
mean_loss, accuracy = evaluate(
model, X_test, Y_test, eval_size, batch_size)
elapsed_time = time.time() - start
print(
'iteration {}... loss={},\taccuracy={},\telapsed_time={}'
.format(it, mean_loss, accuracy, elapsed_time))
if it >= args.iteration:
is_finished = True
break
epoch += 1
if args.iteration is None: # stop based on epoch, instead of iteration
mean_loss, accuracy = evaluate(
model, X_test, Y_test, eval_size, batch_size)
elapsed_time = time.time() - start
print(
'epoch {}... loss={},\taccuracy={},\telapsed_time={}'
.format(epoch, mean_loss, accuracy, elapsed_time))
if epoch >= args.epoch:
is_finished = True
| def main():
parser = argparse.ArgumentParser('Train a neural network on MNIST dataset')
parser.add_argument(
'--batchsize', '-B', type=int, default=100, help='Batch size')
parser.add_argument(
'--epoch', '-E', type=int, default=20,
help='Number of epochs to train')
parser.add_argument(
'--iteration', '-I', type=int, default=None,
help='Number of iterations to train. Epoch is ignored if specified.')
parser.add_argument(
'--data', '-p', default='mnist',
help='Path to the directory that contains MNIST dataset')
parser.add_argument(
'--device', '-d', default='native', help='Device to use')
parser.add_argument(
'--eval-size', default=None, type=int,
help='Number of samples to use from the test set for evaluation. '
'None to use all.')
args = parser.parse_args()
chx.set_default_device(args.device)
# Prepare dataset
X, Y = get_mnist(args.data, 'train')
X_test, Y_test = get_mnist(args.data, 't10k')
# Prepare model
model = MLP()
# Training
N = X.shape[0] # TODO(beam2d): implement len
# TODO(beam2d): support int32 indexing
all_indices_np = np.arange(N, dtype=np.int64)
batch_size = args.batchsize
eval_size = args.eval_size
# Train
model.require_grad()
it = 0
epoch = 0
is_finished = False
start = time.time()
while not is_finished:
# TODO(beam2d): not suupported in chx
np.random.shuffle(all_indices_np)
all_indices = chx.array(all_indices_np)
for i in range(0, N, batch_size):
indices = all_indices[i:i + batch_size]
x = X.take(indices, axis=0)
t = Y.take(indices, axis=0)
y = model.forward(x)
loss = chx.softmax_cross_entropy(y, t).mean()
loss.backward()
model.update(lr=0.01)
it += 1
if args.iteration is not None:
mean_loss, accuracy = evaluate(
model, X_test, Y_test, eval_size, batch_size)
elapsed_time = time.time() - start
print(
'iteration {}... loss={},\taccuracy={},\telapsed_time={}'
.format(it, mean_loss, accuracy, elapsed_time))
if it >= args.iteration:
is_finished = True
break
epoch += 1
if args.iteration is None: # stop based on epoch, instead of iteration
mean_loss, accuracy = evaluate(
model, X_test, Y_test, eval_size, batch_size)
elapsed_time = time.time() - start
print(
'epoch {}... loss={},\taccuracy={},\telapsed_time={}'
.format(epoch, mean_loss, accuracy, elapsed_time))
if epoch >= args.epoch:
is_finished = True
|
52,237 | def app_ssowatconf():
"""
Regenerate SSOwat configuration file
"""
from yunohost.domain import domain_list, _get_maindomain, domain_config_get
from yunohost.permission import user_permission_list
main_domain = _get_maindomain()
domains = domain_list()["domains"]
all_permissions = user_permission_list(
full=True, ignore_system_perms=True, absolute_urls=True
)["permissions"]
permissions = {
"core_skipped": {
"users": [],
"label": "Core permissions - skipped",
"show_tile": False,
"auth_header": False,
"public": True,
"uris": [domain + "/yunohost/admin" for domain in domains]
+ [domain + "/yunohost/api" for domain in domains]
+ [
"re:^[^/]*/%.well%-known/ynh%-diagnosis/.*$",
"re:^[^/]*/%.well%-known/acme%-challenge/.*$",
"re:^[^/]*/%.well%-known/autoconfig/mail/config%-v1%.1%.xml.*$",
],
}
}
redirected_regex = {
main_domain + r"/yunohost[\/]?$": "https://" + main_domain + "/yunohost/sso/"
}
redirected_urls = {}
for app in _installed_apps():
app_settings = read_yaml(APPS_SETTING_PATH + app + "/settings.yml")
# Redirected
redirected_urls.update(app_settings.get("redirected_urls", {}))
redirected_regex.update(app_settings.get("redirected_regex", {}))
for domain in domains:
default_app = domain_config_get(domain, "feature.app.default_app")
if (default_app != "_none"):
app_settings = _get_app_settings(default_app)
app_domain = app_settings["domain"]
app_path = app_settings["path"]
redirected_urls[domain + "/"] = app_domain + app_path
# New permission system
for perm_name, perm_info in all_permissions.items():
uris = (
[]
+ ([perm_info["url"]] if perm_info["url"] else [])
+ perm_info["additional_urls"]
)
# Ignore permissions for which there's no url defined
if not uris:
continue
permissions[perm_name] = {
"users": perm_info["corresponding_users"],
"label": perm_info["label"],
"show_tile": perm_info["show_tile"]
and perm_info["url"]
and (not perm_info["url"].startswith("re:")),
"auth_header": perm_info["auth_header"],
"public": "visitors" in perm_info["allowed"],
"uris": uris,
}
conf_dict = {
"portal_domain": main_domain,
"portal_path": "/yunohost/sso/",
"additional_headers": {
"Auth-User": "uid",
"Remote-User": "uid",
"Name": "cn",
"Email": "mail",
},
"domains": domains,
"redirected_urls": redirected_urls,
"redirected_regex": redirected_regex,
"permissions": permissions,
}
write_to_json("/etc/ssowat/conf.json", conf_dict, sort_keys=True, indent=4)
from .utils.legacy import translate_legacy_rules_in_ssowant_conf_json_persistent
translate_legacy_rules_in_ssowant_conf_json_persistent()
logger.debug(m18n.n("ssowat_conf_generated"))
| def app_ssowatconf():
"""
Regenerate SSOwat configuration file
"""
from yunohost.domain import domain_list, _get_maindomain, domain_config_get
from yunohost.permission import user_permission_list
main_domain = _get_maindomain()
domains = domain_list()["domains"]
all_permissions = user_permission_list(
full=True, ignore_system_perms=True, absolute_urls=True
)["permissions"]
permissions = {
"core_skipped": {
"users": [],
"label": "Core permissions - skipped",
"show_tile": False,
"auth_header": False,
"public": True,
"uris": [domain + "/yunohost/admin" for domain in domains]
+ [domain + "/yunohost/api" for domain in domains]
+ [
"re:^[^/]*/%.well%-known/ynh%-diagnosis/.*$",
"re:^[^/]*/%.well%-known/acme%-challenge/.*$",
"re:^[^/]*/%.well%-known/autoconfig/mail/config%-v1%.1%.xml.*$",
],
}
}
redirected_regex = {
main_domain + r"/yunohost[\/]?$": "https://" + main_domain + "/yunohost/sso/"
}
redirected_urls = {}
for app in _installed_apps():
app_settings = read_yaml(APPS_SETTING_PATH + app + "/settings.yml")
# Redirected
redirected_urls.update(app_settings.get("redirected_urls", {}))
redirected_regex.update(app_settings.get("redirected_regex", {}))
for domain in domains:
default_app = domain_config_get(domain, "feature.app.default_app")
if default_app != "_none" and _is_installed(default_app):
app_settings = _get_app_settings(default_app)
app_domain = app_settings["domain"]
app_path = app_settings["path"]
redirected_urls[domain + "/"] = app_domain + app_path
# New permission system
for perm_name, perm_info in all_permissions.items():
uris = (
[]
+ ([perm_info["url"]] if perm_info["url"] else [])
+ perm_info["additional_urls"]
)
# Ignore permissions for which there's no url defined
if not uris:
continue
permissions[perm_name] = {
"users": perm_info["corresponding_users"],
"label": perm_info["label"],
"show_tile": perm_info["show_tile"]
and perm_info["url"]
and (not perm_info["url"].startswith("re:")),
"auth_header": perm_info["auth_header"],
"public": "visitors" in perm_info["allowed"],
"uris": uris,
}
conf_dict = {
"portal_domain": main_domain,
"portal_path": "/yunohost/sso/",
"additional_headers": {
"Auth-User": "uid",
"Remote-User": "uid",
"Name": "cn",
"Email": "mail",
},
"domains": domains,
"redirected_urls": redirected_urls,
"redirected_regex": redirected_regex,
"permissions": permissions,
}
write_to_json("/etc/ssowat/conf.json", conf_dict, sort_keys=True, indent=4)
from .utils.legacy import translate_legacy_rules_in_ssowant_conf_json_persistent
translate_legacy_rules_in_ssowant_conf_json_persistent()
logger.debug(m18n.n("ssowat_conf_generated"))
|
566 | def _get_feature_flag_items(domain, couch_user):
user_is_admin = couch_user.is_domain_admin(domain)
from corehq.apps.domain.views.fixtures import LocationFixtureConfigView
feature_flag_items = []
if user_is_admin and toggles.SYNC_SEARCH_CASE_CLAIM.enabled(domain):
feature_flag_items.append({
'title': _('Case Search'),
'url': reverse('case_search_config', args=[domain])
})
if user_is_admin and toggles.HIERARCHICAL_LOCATION_FIXTURE.enabled(domain):
feature_flag_items.append({
'title': _('Location Fixture'),
'url': reverse(LocationFixtureConfigView.urlname, args=[domain])
})
# DEPRECATED: only show this is the domain does not have release_management access
can_access_linked_domains = (
user_is_admin and toggles.LINKED_DOMAINS.enabled(domain)
and not can_domain_access_release_management(domain)
)
if can_access_linked_domains:
feature_flag_items.append({
'title': _('Linked Project Spaces'),
'url': reverse('domain_links', args=[domain])
})
feature_flag_items.append({
'title': _('Linked Project Space History'),
'url': reverse('domain_report_dispatcher', args=[domain, 'project_link_report'])
})
from corehq.apps.registry.utils import RegistryPermissionCheck
permission_check = RegistryPermissionCheck(domain, couch_user)
if toggles.DATA_REGISTRY.enabled(domain) and permission_check.can_manage_some:
feature_flag_items.append({
'title': _('Data Registries'),
'url': reverse('data_registries', args=[domain]),
'subpages': [
{
'title': _("Manage Registry"),
'urlname': "manage_registry",
},
],
})
return feature_flag_items
| def _get_feature_flag_items(domain, couch_user):
user_is_admin = couch_user.is_domain_admin(domain)
from corehq.apps.domain.views.fixtures import LocationFixtureConfigView
feature_flag_items = []
if user_is_admin and toggles.SYNC_SEARCH_CASE_CLAIM.enabled(domain):
feature_flag_items.append({
'title': _('Case Search'),
'url': reverse('case_search_config', args=[domain])
})
if user_is_admin and toggles.HIERARCHICAL_LOCATION_FIXTURE.enabled(domain):
feature_flag_items.append({
'title': _('Location Fixture'),
'url': reverse(LocationFixtureConfigView.urlname, args=[domain])
})
# DEPRECATED: only show this if the domain does not have release_management access
can_access_linked_domains = (
user_is_admin and toggles.LINKED_DOMAINS.enabled(domain)
and not can_domain_access_release_management(domain)
)
if can_access_linked_domains:
feature_flag_items.append({
'title': _('Linked Project Spaces'),
'url': reverse('domain_links', args=[domain])
})
feature_flag_items.append({
'title': _('Linked Project Space History'),
'url': reverse('domain_report_dispatcher', args=[domain, 'project_link_report'])
})
from corehq.apps.registry.utils import RegistryPermissionCheck
permission_check = RegistryPermissionCheck(domain, couch_user)
if toggles.DATA_REGISTRY.enabled(domain) and permission_check.can_manage_some:
feature_flag_items.append({
'title': _('Data Registries'),
'url': reverse('data_registries', args=[domain]),
'subpages': [
{
'title': _("Manage Registry"),
'urlname': "manage_registry",
},
],
})
return feature_flag_items
|
25,578 | def queue_channel_open(
nodeaddress_to_channelopenqueue: OpenQueue,
nodeaddress_to_channeldepositqueue: DepositQueue,
channel: Dict,
token_address: str,
node_to_address: Dict,
node_to_endpoint: Dict,
) -> None:
node1 = channel["node1"]
node2 = channel["node2"]
participant1 = node_to_address[node1]
participant2 = node_to_address[node2]
minimum_capacity1 = channel["minimum_capacity1"]
minimum_capacity2 = channel["minimum_capacity2"]
is_node1_with_less_work = len(nodeaddress_to_channelopenqueue[participant1]) < len(
nodeaddress_to_channelopenqueue[participant2]
)
if is_node1_with_less_work:
channel_new = ChannelNew(
token_address=token_address,
participant=participant1,
partner=participant2,
endpoint=node_to_endpoint[node1],
minimum_capacity=minimum_capacity1,
)
nodeaddress_to_channelopenqueue[participant1].append(channel_new)
log.info(f"Queueing {channel_new}")
channel_deposit = ChannelDeposit(
token_address=token_address,
partner=participant1,
endpoint=node_to_endpoint[node2],
minimum_capacity=minimum_capacity1,
)
nodeaddress_to_channeldepositqueue[(token_address, participant2)].append(channel_deposit)
log.info(f"Queueing {channel_deposit}")
else:
channel_new = ChannelNew(
token_address=token_address,
participant=participant2,
partner=participant1,
endpoint=node_to_endpoint[node2],
minimum_capacity=minimum_capacity2,
)
nodeaddress_to_channelopenqueue[participant2].append(channel_new)
log.info(f"Queueing {channel_new}")
channel_deposit = ChannelDeposit(
token_address=token_address,
partner=participant2,
endpoint=node_to_endpoint[node1],
minimum_capacity=minimum_capacity1,
)
nodeaddress_to_channeldepositqueue[(token_address, participant1)].append(channel_deposit)
log.info(f"Queueing {channel_deposit}")
| def queue_channel_open(
nodeaddress_to_channelopenqueue: OpenQueue,
nodeaddress_to_channeldepositqueue: DepositQueue,
channel: Dict,
token_address: str,
node_to_address: Dict,
node_to_endpoint: Dict,
) -> None:
node1 = channel["node1"]
node2 = channel["node2"]
participant1 = node_to_address[node1]
participant2 = node_to_address[node2]
minimum_capacity1 = channel["minimum_capacity1"]
minimum_capacity2 = channel["minimum_capacity2"]
is_node1_with_less_work = len(nodeaddress_to_channelopenqueue[participant1]) < len(
nodeaddress_to_channelopenqueue[participant2]
)
if is_node1_with_less_work:
channel_new = ChannelNew(
token_address=token_address,
participant=participant1,
partner=participant2,
endpoint=node_to_endpoint[node1],
minimum_capacity=minimum_capacity1,
)
nodeaddress_to_channelopenqueue[participant1].append(channel_new)
log.info(f"Queueing {channel_new}")
channel_deposit = ChannelDeposit(
token_address=token_address,
partner=participant1,
endpoint=node_to_endpoint[node2],
minimum_capacity=minimum_capacity2,
)
nodeaddress_to_channeldepositqueue[(token_address, participant2)].append(channel_deposit)
log.info(f"Queueing {channel_deposit}")
else:
channel_new = ChannelNew(
token_address=token_address,
participant=participant2,
partner=participant1,
endpoint=node_to_endpoint[node2],
minimum_capacity=minimum_capacity2,
)
nodeaddress_to_channelopenqueue[participant2].append(channel_new)
log.info(f"Queueing {channel_new}")
channel_deposit = ChannelDeposit(
token_address=token_address,
partner=participant2,
endpoint=node_to_endpoint[node1],
minimum_capacity=minimum_capacity1,
)
nodeaddress_to_channeldepositqueue[(token_address, participant1)].append(channel_deposit)
log.info(f"Queueing {channel_deposit}")
|
Subsets and Splits