input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<gh_stars>10-100
import numpy as np
from scipy.optimize import minimize
from scipy.linalg import null_space
from pymoo.model.algorithm import Algorithm
from pymoo.model.duplicate import DefaultDuplicateElimination
from pymoo.model.individual import Individual
from pymoo.model.initialization import Initialization
from multiprocessing import Process, Queue, cpu_count
import sys
from .buffer import get_buffer
from .utils import propose_next_batch, propose_next_batch_without_label, get_sample_num_from_families
from ..solver import Solver
def _local_optimization(x, y, f, eval_func, bounds, delta_s):
'''
Local optimization of generated stochastic samples by minimizing distance to the target, see section 6.2.3.
Input:
x: a design sample, shape = (n_var,)
y: performance of x, shape = (n_obj,)
f: relative performance to the buffer origin, shape = (n_obj,)
eval_func: problem's evaluation function
bounds: problem's lower and upper bounds, shape = (2, n_var)
delta_s: scaling factor for choosing reference point in local optimization, see section 6.2.3
Output:
x_opt: locally optimized sample x
'''
# choose reference point z
f_norm = np.linalg.norm(f)
s = 2.0 * f / np.sum(f) - 1 - f / f_norm
s /= np.linalg.norm(s)
z = y + s * delta_s * np.linalg.norm(f)
# optimization objective, see eq(4)
def fun(x):
fx = eval_func(x, return_values_of=['F'])
return np.linalg.norm(fx - z)
# jacobian of the objective
dy = eval_func(x, return_values_of=['dF'])
if dy is None:
jac = None
else:
def jac(x):
fx, dfx = eval_func(x, return_values_of=['F', 'dF'])
return ((fx - z) / np.linalg.norm(fx - z)) @ dfx
# do optimization using LBFGS
res = minimize(fun, x, method='L-BFGS-B', jac=jac, bounds=np.array(bounds).T)
x_opt = res.x
return x_opt
def _get_kkt_dual_variables(F, G, DF, DG):
'''
Optimizing for dual variables alpha and beta in KKT conditions, see section 4.2, proposition 4.5.
Input:
Given a design sample,
F: performance value, shape = (n_obj,)
G: active constraints, shape = (n_active_const,)
DF: jacobian matrix of performance, shape = (n_obj, n_var)
DG: jacobian matrix of active constraints, shape = (n_active_const, n_var)
where n_var = D, n_obj = d, n_active_const = K' in the original paper
Output:
alpha_opt, beta_opt: optimized dual variables
'''
# NOTE: use min-norm solution for solving alpha then determine beta instead?
n_obj = len(F)
n_active_const = len(G) if G is not None else 0
'''
Optimization formulation:
To optimize the last line of (2) in section 4.2, we change it to a quadratic optization problem by:
find x to let Ax = 0 --> min_x (Ax)^2
where x means [alpha, beta] and A means [DF, DG].
Constraints: alpha >= 0, beta >= 0, sum(alpha) = 1.
NOTE: we currently ignore the constraint beta * G = 0 because G will always be 0 with only box constraints, but add that constraint will result in poor optimization solution (?)
'''
if n_active_const > 0: # when there are active constraints
def fun(x, n_obj=n_obj, DF=DF, DG=DG):
alpha, beta = x[:n_obj], x[n_obj:]
objective = alpha @ DF + beta @ DG
return 0.5 * objective @ objective
def jac(x, n_obj=n_obj, DF=DF, DG=DG):
alpha, beta = x[:n_obj], x[n_obj:]
objective = alpha @ DF + beta @ DG
return np.vstack([DF, DG]) @ objective
const = {'type': 'eq',
'fun': lambda x, n_obj=n_obj: np.sum(x[:n_obj]) - 1.0,
'jac': lambda x, n_obj=n_obj: np.concatenate([np.ones(n_obj), np.zeros_like(x[n_obj:])])}
else: # when there's no active constraint
def fun(x, DF=DF):
objective = x @ DF
return 0.5 * objective @ objective
def jac(x, DF=DF):
objective = x @ DF
return DF @ objective
const = {'type': 'eq',
'fun': lambda x: np.sum(x) - 1.0,
'jac': np.ones_like}
# specify different bounds for alpha and beta
bounds = np.array([[0.0, np.inf]] * (n_obj + n_active_const))
# NOTE: we use random value to initialize alpha for now, maybe consider the location of F we can get a more accurate initialization
alpha_init = np.random.random(len(F))
alpha_init /= np.sum(alpha_init)
beta_init = np.zeros(n_active_const) # zero initialization for beta
x_init = np.concatenate([alpha_init, beta_init])
# do optimization using SLSQP
res = minimize(fun, x_init, method='SLSQP', jac=jac, bounds=bounds, constraints=const)
x_opt = res.x
alpha_opt, beta_opt = x_opt[:n_obj], x_opt[n_obj:]
return alpha_opt, beta_opt
def _get_active_box_const(x, bounds):
'''
Getting the indices of active box constraints.
Input:
x: a design sample, shape = (n_var,)
bounds: problem's lower and upper bounds, shape = (2, n_var)
Output:
active_idx: indices of all active constraints
upper_active_idx: indices of upper active constraints
lower_active_idx: indices of lower active constraints
'''
eps = 1e-8 # epsilon value to determine 'active'
upper_active = bounds[1] - x < eps
lower_active = x - bounds[0] < eps
active = np.logical_or(upper_active, lower_active)
active_idx, upper_active_idx, lower_active_idx = np.where(active)[0], np.where(upper_active)[0], np.where(lower_active)[0]
return active_idx, upper_active_idx, lower_active_idx
def _get_box_const_value_jacobian_hessian(x, bounds):
'''
Getting the value, jacobian and hessian of active box constraints.
Input:
x: a design sample, shape = (n_var,)
bounds: problem's lower and upper bounds, shape = (2, n_var)
Output:
G: value of active box constraints (always 0), shape = (n_active_const,)
DG: jacobian matrix of active box constraints (1/-1 at active locations, otherwise 0), shape = (n_active_const, n_var)
HG: hessian matrix of active box constraints (always 0), shape = (n_active_const, n_var, n_var)
'''
# get indices of active constraints
active_idx, upper_active_idx, _ = _get_active_box_const(x, bounds)
n_active_const, n_var = len(active_idx), len(x)
if n_active_const > 0:
G = np.zeros(n_active_const)
DG = np.zeros((n_active_const, n_var))
for i, idx in enumerate(active_idx):
constraint = np.zeros(n_var)
if idx in upper_active_idx:
constraint[idx] = 1 # upper active
else:
constraint[idx] = -1 # lower active
DG[i] = constraint
HG = np.zeros((n_active_const, n_var, n_var))
return G, DG, HG
else:
# no active constraints
return None, None, None
def _get_optimization_directions(x_opt, eval_func, bounds):
'''
Getting the directions to explore local pareto manifold.
Input:
x_opt: locally optimized design sample, shape = (n_var,)
eval_func: problem's evaluation function
bounds: problem's lower and upper bounds, shape = (2, n_var)
Output:
directions: local exploration directions for alpha, beta and x (design sample)
'''
# evaluate the value, jacobian and hessian of performance
F, DF, HF = eval_func(x_opt, return_values_of=['F', 'dF', 'hF'])
# evaluate the value, jacobian and hessian of box constraint (NOTE: assume no other types of constraints)
G, DG, HG = _get_box_const_value_jacobian_hessian(x_opt, bounds)
# KKT dual variables optimization
alpha, beta = _get_kkt_dual_variables(F, G, DF, DG)
n_obj, n_var, n_active_const = len(F), len(x_opt), len(G) if G is not None else 0
# compute H in eq(3) (NOTE: the two forms below are equivalent for box constraint since HG = 0)
if n_active_const > 0:
H = HF.T @ alpha + HG.T @ beta
else:
H = HF.T @ alpha
# compute exploration directions (unnormalized) by taking the null space of image in eq(3)
# TODO: this part is mainly copied from Adriana's implementation, to be checked
# NOTE: seems useless to solve for d_alpha and d_beta, maybe need to consider all possible situations in null_space computation
alpha_const = np.concatenate([np.ones(n_obj), np.zeros(n_active_const + n_var)])
if n_active_const > 0:
comp_slack_const = np.column_stack([np.zeros((n_active_const, n_obj + n_active_const)), DG])
DxHx = np.vstack([alpha_const, comp_slack_const, np.column_stack([DF.T, DG.T, H])])
else:
DxHx = np.vstack([alpha_const, np.column_stack([DF.T, H])])
directions = null_space(DxHx)
# eliminate numerical error
eps = 1e-8
directions[np.abs(directions) < eps] = 0.0
return directions
def _first_order_approximation(x_opt, directions, bounds, n_grid_sample):
'''
Exploring new samples from local manifold (first order approximation of pareto front).
Input:
x_opt: locally optimized design sample, shape = (n_var,)
directions: local exploration directions for alpha, beta and x (design sample)
bounds: problem's lower and upper bounds, shape = (2, n_var)
n_grid_sample: number of samples on local manifold (grid), see section 6.3.1
Output:
x_samples: new valid samples from local manifold (grid)
'''
n_var = len(x_opt)
lower_bound, upper_bound = bounds[0], bounds[1]
active_idx, _, _ = _get_active_box_const(x_opt, bounds)
n_active_const = len(active_idx)
n_obj = len(directions) - n_var - n_active_const
x_samples = np.array([x_opt])
# TODO: check why unused d_alpha and d_beta here
d_alpha, d_beta, d_x = directions[:n_obj], directions[n_obj:n_obj + n_active_const], directions[-n_var:]
eps = 1e-8
if np.linalg.norm(d_x) < eps: # direction is a zero vector
return x_samples
direction_dim = d_x.shape[1]
if direction_dim > n_obj - 1:
# more than d-1 directions to explore, randomly choose d-1 sub-directions
indices = np.random.choice(np.arange(direction_dim), n_obj - 1)
while np.linalg.norm(d_x[:, indices]) < eps:
indices = np.random.choice(np.arange(direction_dim), n_obj - 1)
d_x = d_x[:, indices]
elif direction_dim < n_obj - 1:
# less than d-1 | |
-121.61 59.0 200 0 | -0.81 0.33 -40.64 0.79
| UsedTime: 132 |
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
1 1.60e+03-1267.96 |-1267.96 329.7 200 0 | -2.67 0.88 0.56 1.00
1 8.48e+04 -171.79 | -182.24 63.3 200 0 | -0.30 0.32 -30.75 0.64
1 1.19e+05 -171.79 | -178.25 116.8 200 0 | -0.31 0.16 -22.52 0.43
1 1.34e+05 -164.56 | -164.56 99.1 200 0 | -0.31 0.15 -18.09 0.35
1 1.47e+05 -135.20 | -135.20 92.1 200 0 | -0.31 0.14 -15.65 0.29
| UsedTime: 783 |
"""
args = Arguments(AgentModSAC, env)
args.reward_scale = 2 ** -1 # RewardRange: -1800 < -200 < -50 < 0
args.gamma = 0.97
args.target_step = args.max_step * 2
args.eval_times = 2 ** 3
elif env_name == 'LunarLanderContinuous-v2':
"""
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
2 4.25e+03 -143.93 | -143.93 29.6 69 12 | -2.47 1.06 0.13 0.15
2 1.05e+05 170.35 | 170.35 57.9 645 177 | 0.06 1.59 15.93 0.20
2 1.59e+05 170.35 | 80.46 125.0 775 285 | 0.07 1.14 29.92 0.29
2 1.95e+05 221.39 | 221.39 19.7 449 127 | 0.12 1.09 32.16 0.40
| UsedTime: 421 |
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
1 4.26e+03 -139.77 | -139.77 36.7 67 12 | -2.16 11.20 0.12 0.15
1 1.11e+05 -105.09 | -105.09 84.3 821 244 | -0.14 27.60 1.04 0.21
1 2.03e+05 -15.21 | -15.21 22.7 1000 0 | -0.01 17.96 36.95 0.45
1 3.87e+05 59.39 | 54.09 160.7 756 223 | 0.00 16.57 88.99 0.73
1 4.03e+05 59.39 | 56.16 103.5 908 120 | 0.06 16.47 84.27 0.71
1 5.10e+05 186.59 | 186.59 103.6 547 257 | -0.02 12.72 67.97 0.57
1 5.89e+05 226.93 | 226.93 20.0 486 154 | 0.13 9.27 68.29 0.51
| UsedTime: 3407 |
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
1 4.15e+03 -169.01 | -169.01 87.9 110 59 | -2.18 11.86 0.10 0.15
1 1.09e+05 -84.47 | -84.47 80.1 465 293 | -0.30 30.64 -6.29 0.20
1 4.25e+05 -8.33 | -8.33 48.4 994 26 | 0.07 13.51 76.99 0.62
1 4.39e+05 87.29 | 87.29 86.9 892 141 | 0.04 12.76 70.37 0.61
1 5.57e+05 159.17 | 159.17 65.7 721 159 | 0.10 10.31 59.90 0.51
1 5.87e+05 190.09 | 190.09 71.7 577 175 | 0.09 9.45 61.74 0.48
1 6.20e+05 206.74 | 206.74 29.1 497 108 | 0.09 9.21 62.06 0.47
| UsedTime: 4433 |
"""
# env = gym.make('LunarLanderContinuous-v2')
# get_gym_env_args(env=env, if_print=True)
env_func = gym.make
env_args = {'env_num': 1,
'env_name': 'LunarLanderContinuous-v2',
'max_step': 1000,
'state_dim': 8,
'action_dim': 2,
'if_discrete': False,
'target_return': 200,
'id': 'LunarLanderContinuous-v2'}
args = Arguments(AgentModSAC, env_func=env_func, env_args=env_args)
args.target_step = args.max_step
args.gamma = 0.99
args.eval_times = 2 ** 5
elif env_name == 'BipedalWalker-v3':
"""
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
3 7.51e+03 -111.59 | -111.59 0.2 97 7 | -0.18 4.23 -0.03 0.02
3 1.48e+05 -110.19 | -110.19 1.6 84 30 | -0.59 2.46 3.18 0.03
3 5.02e+05 -31.84 | -102.27 54.0 1359 335 | -0.06 0.85 2.84 0.04
3 1.00e+06 -7.94 | -7.94 73.2 411 276 | -0.17 0.72 1.96 0.03
3 1.04e+06 131.50 | 131.50 168.3 990 627 | 0.06 0.46 1.69 0.04
3 1.11e+06 214.12 | 214.12 146.6 1029 405 | 0.09 0.50 1.63 0.04
3 1.20e+06 308.34 | 308.34 0.7 1106 20 | 0.29 0.72 4.56 0.05
| UsedTime: 8611 |
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
3 6.75e+03 -92.44 | -92.44 0.2 120 3 | -0.18 1.94 -0.00 0.02
3 3.95e+05 -37.16 | -37.16 9.2 1600 0 | -0.06 1.90 4.20 0.07
3 6.79e+05 -23.32 | -42.54 90.0 1197 599 | -0.02 0.91 1.57 0.04
3 6.93e+05 46.92 | 46.92 96.9 808 395 | -0.04 0.57 1.34 0.04
3 8.38e+05 118.86 | 118.86 154.5 999 538 | 0.14 1.44 0.75 0.05
3 1.00e+06 225.56 | 225.56 124.1 1207 382 | 0.13 0.72 4.75 0.06
3 1.02e+06 283.37 | 283.37 86.3 1259 245 | 0.14 0.80 3.96 0.06
3 1.19e+06 313.36 | 313.36 0.9 1097 20 | 0.21 0.78 6.80 0.06
| UsedTime: 9354 | SavedDir: ./BipedalWalker-v3_ModSAC_3
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
3 6.55e+03 -109.86 | -109.86 4.5 156 30 | -0.06 0.71 -0.01 0.02
3 1.24e+05 -88.28 | -88.28 26.2 475 650 | -0.15 0.15 0.04 0.02
3 3.01e+05 -47.89 | -56.76 21.7 1341 540 | -0.03 0.19 -2.76 0.05
3 3.82e+05 80.89 | 53.79 140.1 983 596 | -0.01 0.18 0.46 0.05
3 4.35e+05 137.70 | 28.54 104.7 936 581 | -0.01 0.21 0.63 0.06
3 4.80e+05 158.71 | 25.54 114.7 524 338 | 0.18 0.17 6.17 0.06
3 5.31e+05 205.81 | 203.27 143.9 1048 388 | 0.14 0.15 4.00 0.06
3 6.93e+05 254.40 | 252.74 121.1 992 280 | 0.21 0.12 7.34 0.06
3 7.11e+05 304.79 | 304.79 73.4 1015 151 | 0.21 0.12 5.69 0.06
| UsedTime: 3215 |
"""
env_func = gym.make
env_args = {'env_num': 1,
'env_name': 'BipedalWalker-v3',
'max_step': 1600,
'state_dim': 24,
'action_dim': 4,
'if_discrete': False,
'target_return': 300,
'id': 'BipedalWalker-v3', }
args = Arguments(AgentModSAC, env_func=env_func, env_args=env_args)
args.target_step = args.max_step
args.gamma = 0.98
args.eval_times = 2 ** 4
else:
raise ValueError('env_name:', env_name)
args.learner_gpus = gpu_id
args.random_seed += gpu_id
if_check = 0
if if_check:
train_and_evaluate(args)
else:
train_and_evaluate_mp(args)
def demo_continuous_action_on_policy():
env_name = ['Pendulum-v0',
'Pendulum-v1',
'LunarLanderContinuous-v2',
'BipedalWalker-v3'][ENV_ID]
gpu_id = GPU_ID # >=0 means GPU ID, -1 means CPU
if env_name in {'Pendulum-v0', 'Pendulum-v1'}:
env = PendulumEnv(env_name, target_return=-500)
"TotalStep: 1e5, TargetReward: -200, UsedTime: 600s"
args = Arguments(AgentPPO, env)
args.reward_scale = 2 ** -1 # RewardRange: -1800 < -200 < -50 < 0
args.gamma = 0.97
args.target_step = args.max_step * 8
args.eval_times = 2 ** 3
elif env_name == 'LunarLanderContinuous-v2':
"""
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
2 8.40e+03 -167.99 | -167.99 119.9 96 13 | -1.408795.41 0.02 -0.50
2 1.27e+05 -167.99 | -185.92 44.3 187 77 | 0.07 396.60 0.02 -0.51
2 2.27e+05 191.79 | 191.79 83.7 401 96 | 0.16 39.93 0.06 -0.52
2 3.40e+05 220.93 | 220.93 87.7 375 99 | 0.19 121.32 -0.01 -0.53
| UsedTime: 418 |
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
2 8.31e+03 -90.85 | -90.85 49.2 72 12 | -1.295778.93 0.01 -0.50
2 1.16e+05 -90.85 | -126.58 92.2 312 271 | 0.03 215.40 -0.01 -0.50
2 1.96e+05 133.57 | 133.57 156.4 380 108 | 0.04 227.81 0.04 -0.51
2 3.85e+05 195.56 | 195.56 78.4 393 87 | 0.14 26.79 -0.05 -0.54
2 4.97e+05 212.20 | 212.20 90.5 383 72 | 0.18 357.67 -0.01 -0.55
| UsedTime: 681 |
"""
# env = gym.make('LunarLanderContinuous-v2')
# get_gym_env_args(env=env, if_print=True)
env_func = gym.make
env_args = {'env_num': 1,
'env_name': 'LunarLanderContinuous-v2',
'max_step': 1000,
'state_dim': 8,
'action_dim': 2,
'if_discrete': False,
'target_return': 200,
'id': 'LunarLanderContinuous-v2'}
args = Arguments(AgentPPO, env_func=env_func, env_args=env_args)
args.target_step = args.max_step * 2
args.gamma = 0.99
args.eval_times = 2 ** 5
elif env_name == 'BipedalWalker-v3':
"""
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
0 2.72e+04 -38.64 | -38.64 43.7 1236 630 | -0.11 83.06 -0.03 -0.50
0 4.32e+05 -30.57 | -30.57 4.7 1600 0 | -0.01 0.33 -0.06 -0.53
0 6.38e+05 179.12 | 179.12 5.2 1600 0 | 0.06 4.16 0.01 -0.57
0 1.06e+06 274.76 | 274.76 4.5 1600 0 | 0.12 1.11 0.03 -0.61
0 2.11e+06 287.37 | 287.37 46.9 1308 104 | 0.17 5.40 0.03 -0.72
0 2.33e+06 296.76 | 296.76 29.9 1191 30 | 0.20 2.86 0.00 -0.74
0 2.54e+06 307.66 | 307.66 1.9 1163 34 | 0.19 5.40 0.02 -0.75
| UsedTime: 1641 |
ID Step maxR | avgR stdR avgS stdS | expR objC etc.
4 2.88e+04 -112.06 | -112.06 0.1 128 8 | |
raise TypeError(f"difference argument is not iterable, got {iterable!r}")
set_: typing.Set[T]
if isinstance(self._set, set):
set_ = self._set.difference(*iterables)
else:
set_ = set(self._set)
set_.difference_update(*iterables)
return type(self).from_iterable(set_, self.key)
@classmethod
def from_iterable(cls: Type[SortedKeySet[T]], iterable: Iterable[T], /, key: Callable[[T], Any]) -> SortedKeySet[T]:
if not isinstance(iterable, Iterable):
raise TypeError(f"from_iterable expects an iterable, got {iterable!r}")
elif not callable(key):
raise TypeError(f"from_iterable expects a callable key, got {key!r}")
else:
return cls.from_sorted(sorted(set(iterable), key=key), key)
@abstractmethod
@classmethod
def from_sorted(cls: Type[SortedKeySet[T]], iterable: Iterable[T], /, key: Callable[[T], Any]) -> SortedKeySet[T]:
raise NotImplementedError("from_sorted is a required method for sorted key sets")
def index(self: SortedKeySet[Any], value: Any, start: int = 0, stop: Optional[int] = None, /) -> int:
return self._sequence.index(value, start, stop)
def intersection(self: SortedKeySet[T], /, *iterables: Iterable[Any]) -> SortedKeySet[T]:
if len(iterables) == 0:
return self.copy()
for iterable in iterables:
if not isinstance(iterable, Iterable):
raise TypeError(f"intersection argument is not iterable, got {iterable!r}")
set_: typing.Set[T]
if isinstance(self._set, set):
set_ = self._set
else:
set_ = set(self._set)
return type(self).from_iterable(set_.intersection(*iterables), self.key)
def isdisjoint(self: SortedKeySet[Any], iterable: Iterable[Any], /) -> bool:
if isinstance(iterable, Iterable):
return self._set.isdisjoint(iterable)
else:
raise TypeError(f"isdisjoint argument is not iterable, got {iterable!r}")
def issubset(self: SortedKeySet[Any], iterable: Iterable[Any], /) -> bool:
if not isinstance(iterable, Iterable):
raise TypeError(f"issubset argument is not iterable, got {iterable!r}")
elif isinstance(iterable, set):
return iterable.issuperset(self)
elif isinstance(iterable, AbstractSet):
return all(x in iterable for x in self)
elif isinstance(self._set, set):
return len(self._set.intersection(iterable)) == len(self)
else:
return len({x for x in iterable if x in self}) == len(self)
def issuperset(self: SortedKeySet[Any], iterable: Iterable[Any], /) -> bool:
if not isinstance(iterable, Iterable):
raise TypeError(f"issuperset argument is not iterable, got {iterable!r}")
elif isinstance(self._set, set):
return self._set.issuperset(iterable)
else:
return all(x in self for x in iterable)
def symmetric_difference(self: SortedKeySet[T], /, *iterables: Iterable[S]) -> SortedKeySet[Union[T, S]]:
if len(iterables) == 0:
return cast(SortedKeySet[Union[T, S]], self.copy())
for iterable in iterables:
if not isinstance(iterable, Iterable):
raise TypeError(f"symmetric_difference argument is not iterable, got {iterable!r}")
set_: typing.Set[Union[T, S]]
if isinstance(self._set, set):
set_ = self._set.symmetric_difference(iterables[0])
else:
set_ = set(self._set)
set_.symmetric_difference_update(iterables[0])
for i in range(1, len(iterables)):
set_.symmetric_difference_update(iterables[i])
return type(self).from_iterable(set_, self.key) # type: ignore
def union(self: SortedKeySet[T], /, *iterables: Iterable[S]) -> SortedKeySet[Union[T, S]]:
for iterable in iterables:
if not isinstance(iterable, Iterable):
raise TypeError(f"union argument is not iterable, got {iterable!r}")
return type(self).from_iterable(chain(self, *iterables), self.key) # type: ignore
@property
def key(self: SortedKeySet[T], /) -> Callable[[T], Any]:
return self._sequence.key
@property
@abstractmethod
def _sequence(self: SortedKeySet[T], /) -> SortedKeySequence[T]:
raise NotImplementedError("_sequence is a required property of sorted key sets")
@property
@abstractmethod
def _set(self: SortedKeySet[T], /) -> AbstractSet[T]:
raise NotImplementedError("_set is a required property of sorted key sets")
class SortedMutableSet(SortedSet[T_co], MutableSet[T_co], ABC, Generic[T_co]):
__slots__ = ()
def __and__(self: SortedMutableSet[T_co], other: Iterable[Any], /) -> SortedMutableSet[T_co]:
if isinstance(other, AbstractSet):
return self.intersection(other)
else:
return NotImplemented
@overload
def __getitem__(self: SortedMutableSet[T_co], index: int, /) -> T_co:
...
@overload
def __getitem__(self: SortedMutableSet[T_co], index: slice, /) -> MutableSequence[T_co]:
...
def __getitem__(self, index, /):
return self._sequence[index]
def __iand__(self: SortedMutableSet[T_co], other: Iterable[Any], /) -> SortedMutableSet[T_co]:
if isinstance(other, Iterable):
self.intersection_update(other)
return self
else:
return NotImplemented
def __ior__(self: SortedMutableSet[T_co], other: Iterable[T], /) -> SortedMutableSet[Union[T_co, T]]:
if isinstance(other, Iterable):
self.update(cast(Iterable[T_co], other))
return cast(SortedMutableSet[Union[T_co, T]], self)
else:
return NotImplemented
def __iter__(self: SortedMutableSet[T_co], /) -> SortedIterator[T_co]:
return iter(self._sequence)
def __isub__(self: SortedMutableSet[T_co], other: Iterable[Any], /) -> SortedMutableSet[T_co]:
if isinstance(other, Iterable):
self.difference_update(other)
return self
else:
return NotImplemented
def __ixor__(self: SortedMutableSet[T_co], other: Iterable[T], /) -> SortedMutableSet[Union[T_co, T]]:
if isinstance(other, Iterable):
self.symmetric_difference_update(cast(Iterable[T_co], other))
return cast(SortedMutableSet[Union[T_co, T]], self)
else:
return NotImplemented
def __or__(self: SortedMutableSet[T_co], other: Iterable[T], /) -> SortedMutableSet[Union[T_co, T]]:
if isinstance(other, AbstractSet):
return cast(SortedMutableSet[Union[T_co, T]], self.union(other))
else:
return NotImplemented
__ror__ = __or__
def __rand__(self: SortedMutableSet[Any], other: Iterable[T], /) -> SortedMutableSet[T]:
if isinstance(other, AbstractSet):
return self.intersection(other)
else:
return NotImplemented
def __rsub__(self: SortedMutableSet[Any], other: Iterable[T], /) -> SortedMutableSet[T]:
if isinstance(other, AbstractSet):
import more_collections.sorted as mcs
set_ = mcs.SortedSet.from_iterable(other)
set_ -= self
return set_
else:
return NotImplemented
def __sub__(self: SortedMutableSet[T_co], other: Iterable[Any], /) -> SortedMutableSet[T_co]:
if isinstance(other, AbstractSet):
return self.difference(other)
else:
return NotImplemented
def __xor__(self: SortedMutableSet[T_co], other: Iterable[T], /) -> SortedMutableSet[Union[T_co, T]]:
if isinstance(other, AbstractSet):
return cast(SortedMutableSet[Union[T_co, T]], self.symmetric_difference(other))
else:
return NotImplemented
__rxor__ = __xor__
def add(self: SortedMutableSet[T], value: T, /) -> None:
len_ = len(self._set)
self._set.add(value)
if len(self._set) != len_:
self._sequence.append(value)
def clear(self: SortedMutableSet[Any], /) -> None:
self._sequence.clear()
self._set.clear()
def difference(self: SortedMutableSet[T_co], /, *iterables: Iterable[Any]) -> SortedMutableSet[T_co]:
return cast(SortedMutableSet[T_co], super().difference(*iterables))
def difference_update(self: SortedMutableSet[Any], /, *iterables: Iterable[Any]) -> None:
for iterable in iterables:
if not isinstance(iterable, Iterable):
raise TypeError(f"difference_update argument is not iterable, got {iterable!r}")
for iterable in iterables:
for x in iterable:
self.discard(x)
def discard(self: SortedMutableSet[Any], value: Any, /) -> None:
len_ = len(self._set)
self._set.discard(value)
if len(self._set) != len_:
self._sequence.remove(value)
@classmethod
def from_iterable(cls: Type[SortedMutableSet[T_co]], iterable: Iterable[T_co], /) -> SortedMutableSet[T_co]:
if isinstance(iterable, Iterable):
return cls.from_sorted(sorted(set(iterable))) # type: ignore
else:
raise TypeError(f"from_iterable expects an iterable, got {iterable!r}")
@abstractmethod
@classmethod
def from_sorted(cls: Type[SortedMutableSet[T]], iterable: Iterable[T], /) -> SortedMutableSet[T]:
raise NotImplementedError("from_sorted is a required method for sorted mutable sets")
def intersection(self: SortedMutableSet[T_co], /, *iterables: Iterable[Any]) -> SortedMutableSet[T_co]:
return cast(SortedMutableSet[T_co], super().intersection(*iterables))
def intersection_update(self: SortedMutableSet[Any], /, *iterables: Iterable[Any]) -> None:
if len(iterables) == 0:
self.clear()
return
for iterable in iterables:
if not isinstance(iterable, Iterable):
raise TypeError(f"intersection_update argument is not iterable, got {iterable!r}")
set_: typing.Set[T_co]
if isinstance(self._set, set):
set_ = self._set.intersection(*iterables)
else:
set_ = set(self._set)
set_.intersection_update(*iterables)
set_.symmetric_difference_update(self._set)
self.difference_update(set_)
def pop(self: SortedMutableSet[T_co], index: int = -1, /) -> T_co:
if not isinstance(index, SupportsIndex):
raise TypeError(f"pop could not interpret index as an integer, got {index!r}")
index = operator.index(index)
len_ = len(self._set)
if index < 0:
index += len_
if not 0 <= index < len_:
raise IndexError("index out of range")
value = self._sequence.pop(index)
self._set.remove(value)
return value
def remove(self: SortedMutableSet[Any], value: Any, /) -> None:
len_ = len(self._set)
self._set.discard(value)
if len(self._set) == len_:
raise KeyError(value)
self._sequence.remove(value)
def symmetric_difference(self: SortedMutableSet[T_co], /, *iterables: Iterable[T]) -> SortedMutableSet[Union[T_co, T]]:
return cast(SortedMutableSet[T_co], super().symmetric_difference(*iterables))
def symmetric_difference_update(self: SortedMutableSet[T_co], /, *iterables: Iterable[T_co]) -> None:
if len(iterables) == 0:
return
for iterable in iterables:
if not isinstance(iterable, Iterable):
raise TypeError(f"symmetric_difference_update argument is not iterable, got {iterable!r}")
set_: typing.Set[T_co] = set(iterables[0])
for i in range(1, len(iterables)):
set_.symmetric_difference_update(iterables[i])
for x in set_:
if x in self:
self.remove(x)
else:
self.add(x)
def union(self: SortedMutableSet[T_co], /, *iterables: Iterable[T]) -> SortedMutableSet[Union[T_co, T]]:
return cast(SortedMutableSet[T_co], super().union(*iterables))
def update(self: SortedMutableSet[T_co], /, *iterables: Iterable[T_co]) -> None:
for iterable in iterables:
if not isinstance(iterable, Iterable):
raise TypeError(f"update argument is not iterable, got {iterable!r}")
for iterable in iterables:
for x in iterable:
self.add(x)
@property
@abstractmethod
def _sequence(self: SortedMutableSet[T_co], /) -> SortedMutableSequence[T_co]:
raise NotImplementedError("_sequence is a required property of sorted mutable sets")
@property
@abstractmethod
def _set(self: SortedMutableSet[T_co], /) -> MutableSet[T_co]:
raise NotImplementedError("_set is a required property of sorted mutable sets")
class SortedKeyMutableSet(SortedKeySet[T_co], MutableSet[T_co], ABC, Generic[T_co]):
__slots__ = ()
def __and__(self: SortedKeyMutableSet[T_co], other: Iterable[Any], /) -> SortedKeyMutableSet[T_co]:
if isinstance(other, AbstractSet):
return self.intersection(other)
else:
return NotImplemented
@overload
def __getitem__(self: SortedKeyMutableSet[T_co], index: int, /) -> T_co:
...
@overload
def __getitem__(self: SortedKeyMutableSet[T_co], index: slice, /) -> MutableSequence[T_co]:
...
def __getitem__(self, index, /):
return self._sequence[index]
def __iand__(self: SortedKeyMutableSet[T_co], other: Iterable[Any], /) -> SortedKeyMutableSet[T_co]:
if isinstance(other, Iterable):
self.intersection_update(other)
return self
else:
return NotImplemented
def __ior__(self: SortedKeyMutableSet[T_co], other: Iterable[T], /) -> SortedKeyMutableSet[Union[T_co, T]]:
if isinstance(other, Iterable):
self.update(cast(Iterable[T_co], other))
return cast(SortedKeyMutableSet[Union[T_co, T]], self)
else:
return NotImplemented
def __isub__(self: SortedKeyMutableSet[T_co], other: Iterable[Any], /) -> SortedKeyMutableSet[T_co]:
if isinstance(other, Iterable):
self.difference_update(other)
return self
else:
return NotImplemented
def __iter__(self: SortedKeyMutableSet[T_co], /) -> SortedKeyIterator[T_co]:
return iter(self._sequence)
def __ixor__(self: SortedKeyMutableSet[T_co], other: Iterable[T], /) -> SortedKeyMutableSet[Union[T_co, T]]:
if isinstance(other, Iterable):
self.symmetric_difference_update(cast(Iterable[T_co], other))
return cast(SortedKeyMutableSet[Union[T_co, T]], self)
else:
return NotImplemented
def __or__(self: SortedKeyMutableSet[T_co], other: Iterable[T], /) -> SortedKeyMutableSet[Union[T_co, T]]:
if isinstance(other, AbstractSet):
return self.union(other)
else:
return NotImplemented
__ror__ = __or__
def __rand__(self: SortedKeyMutableSet[Any], other: Iterable[T], /) -> SortedKeyMutableSet[T]:
if isinstance(other, AbstractSet):
return self.intersection(other)
else:
return NotImplemented
def __rsub__(self: SortedKeyMutableSet[Any], other: Iterable[T], /) -> SortedKeyMutableSet[T]:
if isinstance(other, AbstractSet):
import more_collections.sorted as mcs
set_ = mcs.SortedSet.from_iterable(other)
set_ -= self
return set_
else:
return NotImplemented
def __sub__(self: SortedKeyMutableSet[T_co], other: Iterable[Any], /) -> SortedKeyMutableSet[T_co]:
if isinstance(other, AbstractSet):
return self.difference(other)
else:
return NotImplemented
def __xor__(self: SortedKeyMutableSet[T_co], other: Iterable[T], /) -> SortedKeyMutableSet[Union[T_co, T]]:
if isinstance(other, AbstractSet):
return self.symmetric_difference(other)
else:
return NotImplemented
__rxor__ = __xor__
def add(self: SortedKeyMutableSet[T], value: T, /) -> None:
len_ | |
import sys
import re
import operator
import itertools
from os import path
from FileTools import loadAirlineCallsigns
reOpenTag = re.compile('^<([a-z_]+)>$')
reOpenAnyTag = re.compile('^<([a-z_="]+)>$')
reCloseTag = re.compile('^</([a-z_]+)>$')
xml_temp = '.*<{0}>([a-z ]+)</{0}>.*'
xml_temp2 = '.*<{0}>([a-z ]*?)</{0}>.*'
reCmdStart = re.compile('<command=\"([a-z_]+)\">')
reConfid = re.compile(':[0-9].[0-9]*')
NO_CALLSIGN = 'NO_CALLSIGN'
NO_AIRLINE = 'NO_AIRLINE_'
UNKNOWN_AIRLINE = 'UNKNOWN_AIRLINE_'
NO_FLIGHTNUMBER = '_NO_FLIGHTNUMBER'
NO_CONCEPT = 'NO_CONCEPT'
DEFAULT_SCORE = 1.0
NOISE_TOKENS = ['_spn_', '_nsn_']
PRECISION = 4
CONF_SEPARATOR = ':'
CONFMODE_OFF = 0
CONFMODE_MIN = 1
CONFMODE_PROD = 2
CONFMODE_ARITMEAN = 3
CONFMODE_GEOMEAN = 4
CONFIDENCE_SUM_MODES = dict(off=CONFMODE_OFF,
min=CONFMODE_MIN,
prod=CONFMODE_PROD,
amean=CONFMODE_ARITMEAN,
gmean=CONFMODE_GEOMEAN)
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def reIsNotEmpty(re_groups, group_id=1):
if re_groups is not None and len(re_groups.group(group_id).strip()) > 0:
return True
else:
return False
def multiMatch(item, matchingFunctions, matchAll=False):
"""
Convenience function to evaluate an item with a series of matching functions
If matchAll is false, returns true if at least one matching function returns true.
If matchAll is true, returns true if all matching functions return true.
If matchingFunctions is an empty list, returns False.
"""
if len(matchingFunctions) == 0:
return False
matches = [matchFunc(item) for matchFunc in matchingFunctions]
if matchAll:
return all(matches)
else:
return True in matches
def getAbsolutePath():
# Ensure that script can be executed from anywhere (e.g. via python tools/GenerateConcept.py)
scriptpath = path.dirname(sys.argv[0])
return path.abspath(scriptpath)
def parseConfidenceMode(confidenceKey):
return CONFIDENCE_SUM_MODES.get(confidenceKey, CONFMODE_OFF)
class TagFrame(object):
"""
A token sequence surrounded by an XML tag.
Each token consists of a word/tag string and a confidence value.
"""
DEFAULT_CONFIDENCE = DEFAULT_SCORE
def __init__(self, tokenPairs, isStrict=False):
# Validate
if isStrict:
# Validate length of list
if len(tokenPairs) < 2:
raise ValueError("token pair list too short (len={0}): {1}".format(len(tokenPairs), tokenPairs))
# Validate tokenPairs is a pair list
if len(tokenPairs[0]) != 2:
raise ValueError("Invalid format for token pairs: {0}".format(tokenPairs))
# Validate a tag is surrounding the sentence
firstToken = tokenPairs[0][0]
lastToken = tokenPairs[-1][0]
matchTag = reOpenTag.match(firstToken)
matchCmd = reCmdStart.match(firstToken)
# More validation
if isStrict:
if not (matchCmd and lastToken == '</command>') and not (
matchTag and lastToken == '</{0}>'.format(matchTag.group(1))):
raise ValueError(
"First/Last item must be matching opening/closing xml tags: {0} ... {1}".format(firstToken,
lastToken))
self.tokenPairs = tokenPairs
self.isStrict = isStrict
def __str__(self):
return 'TagFrame({0})'.format(self.tokenPairs)
@classmethod
def loadFromMBR(cls, inputFile, isStrict=False):
# mdr input (one token per line, multiple token features)
with open(inputFile) as f:
tokenPairs = list()
for line in f:
items = line.strip().lower().split()
token = items[-2]
confidence = float(items[-1])
tokenPairs.append((token, confidence))
tokenPairs = cls.repairTagStructure(tokenPairs)
return TagFrame(tokenPairs, isStrict)
@classmethod
def loadFromString(cls, string, isStrict=False):
if string is None:
return None
string = str(string) # Ensure we are dealing with str, not unicode object
string = string.strip()
string = string.replace('>', '> ') # Workaround for faulty spacing around tags
string = string.replace('<', ' <')
string = string.replace(' ', ' ')
stringtmp = ''
for s in string.split():
reSearch = re.search(reConfid, s)
if reSearch is not None:
stringtmp = stringtmp + ' ' + s.replace(reSearch.group(0), ' ' + reSearch.group(0) + ' ')
else:
stringtmp = stringtmp + ' ' + s
string = (' '.join(stringtmp.split())).replace(' :', ':')
string = string.strip().lower()
if len(string) == 0:
return None
else:
tokenPairs = list()
for word in string.split():
confidence = cls.DEFAULT_CONFIDENCE
if CONF_SEPARATOR in word:
word, confidence = word.split(CONF_SEPARATOR, 1)
confidence = float(confidence)
tokenPairs.append((word, confidence))
tokenPairs = cls.repairTagStructure(tokenPairs)
return TagFrame(tokenPairs, isStrict)
@classmethod
def repairTagStructure(cls, tokenPairs):
"""
Fixes a sentence so that all XML tags that were opened are also closed at the end.
Input is a list of token pairs (i.e. (word,confidence))
"""
repairedTokenPairs = list()
openTags = list()
for tokenPair in tokenPairs:
word = tokenPair[0]
if reOpenAnyTag.match(word):
# The only tags surrounding a command should be <s> and <commands>
# Close all other tags when a new command starts
if word.startswith('<command='):
while len(openTags) > 0:
if openTags[-1] in ['<s>', '<commands>']:
break
else:
openTag = openTags.pop()
cls._appendClosingPair_(repairedTokenPairs, openTag)
# Add the opened tag
openTags.append(word)
repairedTokenPairs.append(tokenPair)
elif reCloseTag.match(word):
matchFound = False
# Close all tags up until and including the current one
while not matchFound and len(openTags) > 0:
openTag = openTags.pop()
cls._appendClosingPair_(repairedTokenPairs, openTag)
closeTag = repairedTokenPairs[-1][0]
if word == closeTag:
matchFound = True
# In case there was no open tag matching the current closing tag
# Put in a new opening tag and close it immediately.
# This should make bug hunting easier than just not printing it.
if not matchFound:
openPair = ('<' + word[2:], cls.DEFAULT_CONFIDENCE)
repairedTokenPairs.append(openPair)
repairedTokenPairs.append(tokenPair) # This is the closing tag
else: # Regular word
repairedTokenPairs.append(tokenPair)
# If there are any tags left open, close them at the end of the utterance.
while len(openTags) > 0:
openTag = openTags.pop()
cls._appendClosingPair_(repairedTokenPairs, openTag)
return repairedTokenPairs
@staticmethod
def getClosingTag(openingTag):
openRE = reOpenAnyTag.match(openingTag)
if openRE:
name = openRE.group(1)
if name.startswith('command='): # The ending tag for commands is just </command>
name = 'command'
closingTag = '</{0}>'.format(name)
else:
closingTag = None
return closingTag
@classmethod
def _appendClosingPair_(cls, tokenPairs, openTag):
"""
Convenience method that takes an opening tag,
calculates its closing tag, turns it into a (tag,confidence) pair
and appends it to the given token pair list.
"""
closeTag = cls.getClosingTag(openTag)
closePair = (closeTag, cls.DEFAULT_CONFIDENCE)
tokenPairs.append(closePair)
def __len__(self):
return len(self.tokenPairs)
def isEmptyTag(self):
return len(self) <= 2
def getFramePairs(self, contentOnly=False):
if contentOnly:
return self.tokenPairs[1:-1]
else:
return self.tokenPairs
def getSplitPairs(self, contentOnly=False):
tokenPairs = self.getFramePairs(contentOnly)
tokens = list()
confidences = list()
for token, confidence in tokenPairs:
tokens.append(token)
confidences.append(confidence)
return tokens, confidences
def getTokens(self, contentOnly=False):
tokenPairs = self.getFramePairs(contentOnly)
return [token for token, _ in tokenPairs]
def getConfidenceValues(self, contentOnly=False):
tokenPairs = self.getFramePairs(contentOnly)
return [confidence for _, confidence in tokenPairs]
def getFramePair(self, i):
return self.tokenPairs[i]
def getToken(self, i):
return self.tokenPairs[i][0]
def getConfidenceValue(self, i):
return self.tokenPairs[i][1]
def containsTerm(self, term, termIsSet=False):
if termIsSet:
terms = term
else:
terms = [term]
for token in self.getTokens():
if token in terms:
return True
return False
def toString(self, contentOnly=False):
return ' '.join(self.getTokens(contentOnly=contentOnly))
@staticmethod
def _splitByMatch_(tokenPairs, matchFunc, n=0, assignMatchToHead=False):
"""
Splits a list of tuples according to a matching function.
The matching function is applied to the nth item of the tuple (default 0).
Returns two lists. The second list starts with the matched item.
If no match was found, returns the full list as first element and None as second element.
"""
for i in range(len(tokenPairs)):
token = tokenPairs[i][n]
if matchFunc(token):
if assignMatchToHead:
head = tokenPairs[:i + 1]
tail = tokenPairs[i + 1:]
else:
head = tokenPairs[:i]
tail = tokenPairs[i:]
return head, tail
# In case no item matched
return tokenPairs, None
def _extractSubframe_(self, startMatch, endMatch, n=0):
head, rest = TagFrame._splitByMatch_(self.tokenPairs, startMatch, n=n, assignMatchToHead=False)
if rest is None:
return None, self
body, tail = TagFrame._splitByMatch_(rest, endMatch, n=n, assignMatchToHead=True)
extractedFrame = TagFrame(body, self.isStrict)
outerFrame = TagFrame(head + tail, self.isStrict)
return extractedFrame, outerFrame
def extractTag(self, tag, is_command=False):
"""
Extracts a tag frame from a parent tag frame.
tag is the name of the tag to be extracted.
If is_command is true, tag is used as <command="{tag}"> rather than as the tag itself.
Returns a tuple (extractedFrame, outerFrame), where the former is the
extracted tag frame and the later is the remaindr of the parent frame
without the extracted bit.
"""
if is_command:
startTag = '<command="{0}">'.format(tag)
endTag = '</command>'
else:
startTag = '<{0}>'.format(tag)
endTag = '</{0}>'.format(tag)
return self._extractSubframe_(startTag.__eq__, endTag.__eq__, n=0)
def extractCommand(self):
"""
Extract the first command found inside the frame.
Returns a tuple (extractedFrame, outerFrame), where the former is the
extracted command frame and the later is the remaindEr of the parent frame
without the extracted bit.
"""
return self._extractSubframe_(reCmdStart.match, '</command>'.__eq__, n=0)
def extractNoise(self, startMatches=None, endMatches=None, contentOnly=False):
"""
Extracts all noise tokens (and their confidences) from a frame.
To match TagFrame format, the tokens are surrounded by artificial <noise> tags.
startMatches is a list of truth functions.
Noise is only extracted after one of the functions has returned true for a token.
If startMatches is None, search starts immediately from the start.
endMatches is a list of truth functions.
Noise is only extracted until one of the functions has returned true for a token.
If endMatches is None, search continues until the end of the frame.
"""
noiseSequence = [('<noise>', 1.0)]
if startMatches is None:
hasStarted = True # If no start string is given, start immediately
| |
1, Ns))
write_eq(Symbol(r'{C_{p,k}}^{\circ}'), cp[k])
write_eq(cp[k], cpfunc, sympy=True)
write_eq(cp[k], expand(cpfunc))
write_eq(diff(cp[k], T), simplify(diff(cpfunc, T)))
dcpdT = R * \
(a[k, 1] + T * (2 * a[k, 2] + T * (3 * a[k, 3] + 4 * a[k, 4] * T)))
dcpdT = assert_subs(diff(cpfunc, T), (
diff(cpfunc, T), dcpdT
))
write_eq(diff(cp[k], T), dcpdT, sympy=True)
write_eq(cp_tot_sym, cp_tot)
cvfunc = simplify(cpfunc - R)
cv = MyIndexedFunc(r'{C_v}', T)
cv_tot_sym = MyImplicitSymbol(r'\bar{c_v}', T)
cv_tot = Sum(nk[k] / n_sym * cv[k], (k, 1, Ns))
write_eq(Symbol(r'{C_{v,k}}^{\circ}'), cv[k])
write_eq(cv[k], cvfunc, sympy=True)
write_eq(cv[k], expand(cvfunc))
write_eq(diff(cv[k], T), simplify(diff(cvfunc, T)))
dcvdT = assert_subs(diff(cvfunc, T), (
diff(cvfunc, T), R * (a[k, 1] + T * (
2 * a[k, 2] + T * (3 * a[k, 3] + T * 4 * a[k, 4])))
))
write_eq(diff(cv[k], T), dcvdT, sympy=True)
write_eq(cv_tot_sym, cv_tot)
hfunc = R * (T * (a[k, 0] + T * (a[k, 1] * Rational(1, 2) + T * (
a[k, 2] * Rational(1, 3) + T * (
a[k, 3] * Rational(1, 4) + a[k, 4] * T * Rational(1, 5))
))) + a[k, 5])
# check that the dH/dT = cp identity holds
write_eq(Symbol(r'H_k^{\circ}'), h[k])
write_eq(h[k], hfunc, sympy=True, register=True)
write_eq(h[k], expand(hfunc))
dhdT = simplify(diff(hfunc, T))
dhdT = assert_subs(dhdT, (
dhdT, R * (a[k, 0] + T * (a[k, 1] + T * (
a[k, 2] + T * (a[k, 3] + T * a[k, 4]))))))
write_eq(diff(h[k], T), dhdT, sympy=True)
# and du/dT
write_dummy_eq(r'H_k = U_k + \frac{P V}{n}')
write_eq(u[k], h[k] - R * T)
ufunc = h[k] - R * T
ufunc = collect(assert_subs(ufunc, (h[k], hfunc)), R)
write_eq(u[k], ufunc, sympy=True)
dudT = diff(ufunc, T)
dudT = assert_subs(dudT, (
dudT, R * (-1 + a[k, 0] + T * (a[k, 1] + T * (
a[k, 2] + T * (a[k, 3] + T * a[k, 4]))))))
write_eq(diff(u[k], T), dudT, sympy=True)
# finally do the entropy and B terms
Sfunc = R * (a[k, 0] * log(T) + T * (a[k, 1] + T * (a[k, 2] * Rational(1, 2) +
T * (a[k, 3] * Rational(1, 3) + a[k, 4] * T * Rational(1, 4)))) + a[k, 6])
s = MyIndexedFunc(r'S', T)
write_eq(Eq(Symbol(r'S_k^{\circ}'), s[k]), Sfunc)
Jac = MyIndexedBase(r'\mathcal{J}', (Ns - 1, Ns - 1))
# reaction rates
write_section('Definitions')
nu_f = MyIndexedBase(r'\nu^{\prime}')
nu_r = MyIndexedBase(r'\nu^{\prime\prime}')
nu = nu_r[k, i] - nu_f[k, i]
nu_sym = MyIndexedBase(r'\nu')
write_eq(nu_sym[k, i], nu)
q_sym = MyIndexedFunc('q', args=(nk, T, V, P))
omega_k = Sum(nu_sym[k, i] * q_sym[i], (i, 1, Nr))
omega_sym_q_k = omega_k
write_eq(wdot[k], omega_k, register=True)
Rop_sym = MyIndexedFunc('R', args=(nk, T, V, P))
ci = MyIndexedFunc('c', args=(nk, T, V, P))
q = Rop_sym[i] * ci[i]
write_eq(q_sym[i], q, register=True)
omega_k = assert_subs(omega_k, (q_sym[i], q))
write_eq(wdot[k], omega_k, sympy=True)
# arrhenius coeffs
A = MyIndexedBase(r'A')
Beta = MyIndexedBase(r'\beta')
Ea = MyIndexedBase(r'{E_{a}}')
write_section('Rate of Progress')
Ropf_sym = MyIndexedFunc(r'{R_f}', args=(nk, T, V, P))
Ropr_sym = MyIndexedFunc(r'{R_r}', args=(nk, T, V, P))
Rop = Ropf_sym[i] - Ropr_sym[i]
write_eq(Rop_sym[i], Ropf_sym[i] - Ropr_sym[i], sympy=True, register=True)
kf_sym = MyIndexedFunc(r'{k_f}', T)
Ropf = kf_sym[i] * Product(Ck[k]**nu_f[k, i], (k, 1, Ns))
write_eq(Ropf_sym[i], Ropf, sympy=True, register=True)
kr_sym = MyIndexedFunc(r'{k_r}', T)
Ropr = kr_sym[i] * Product(Ck[k]**nu_r[k, i], (k, 1, Ns))
write_eq(Ropr_sym[i], Ropr, register=True)
write_section('Third-body effect')
# write the various ci forms
ci_elem = Integer(1)
write_conditional(
ci[i], ci_elem, r'\quad for elementary reactions', enum_conds=reaction_type.elementary)
ci_thd_sym = MyImplicitSymbol('[X]_i', args=(nk, T, V, P))
write_conditional(
ci[i], ci_thd_sym, r'\quad for third-body enhanced reactions', enum_conds=reaction_type.thd)
Pri_sym = MyImplicitSymbol('P_{r, i}', args=(nk, T, V, P))
Fi_sym = MyImplicitSymbol('F_{i}', args=(nk, T, V, P))
ci_fall = (Pri_sym / (1 + Pri_sym)) * Fi_sym
write_conditional(ci[i], ci_fall, r'\quad for unimolecular/recombination falloff reactions',
enum_conds=[reaction_type.fall])
ci_chem = (1 / (1 + Pri_sym)) * Fi_sym
write_conditional(ci[i], ci_chem, r'\quad for chemically-activated bimolecular reactions',
enum_conds=[reaction_type.chem])
write_section('Forward Reaction Rate')
kf = A[i] * (T**Beta[i]) * exp(-Ea[i] / (R * T))
write_eq(kf_sym[i], kf, register=True,
enum_conds=[reaction_type.elementary, reaction_type.thd, reaction_type.fall, reaction_type.chem])
write_section('Equilibrium Constants')
Kp_sym = MyIndexedFunc(r'{K_p}', args=(T, a))
Kc_sym = MyIndexedFunc(r'{K_c}', args=(T))
write_eq(
Kc_sym[i], Kp_sym[i] * ((Patm / (R * T))**Sum(nu_sym[k, i], (k, 1, Ns))))
write_dummy_eq(latex(Kp_sym[i]) + ' = ' +
r'\text{exp}(\frac{\Delta S^{\circ}_k}{R_u} - \frac{\Delta H^{\circ}_k}{R_u T})')
write_dummy_eq(latex(Kp_sym[i]) + ' = ' +
r'\text{exp}\left(\sum_{k=1}^{N_s}\nu_{ki}\left(\frac{S^{\circ}_k}{R_u} - \frac{H^{\circ}_k}{R_u T}\right)\right)')
B_sym = MyIndexedFunc('B', T)
Kc = ((Patm / R)**Sum(nu_sym[k, i], (k, 1, Ns))) * \
exp(Sum(nu_sym[k, i] * B_sym[k], (k, 1, Ns)))
write_eq(Kc_sym[i], Kc, sympy=True, register=True)
write_dummy_eq(latex(
B_sym[k]) + r'= \frac{S^{\circ}_k}{R_u} - \frac{H^{\circ}_k}{R_u T} - ln(T)')
Bk = simplify(Sfunc / R - hfunc / (R * T) - log(T))
Bk_rep = a[k, 6] - a[k, 0] + (a[k, 0] - Integer(1))*log(T) +\
T * (a[k, 1] * Rational(1, 2) + T * (a[k, 2] * Rational(1, 6) + T *
(a[k, 3] * Rational(1, 12) + a[k, 4] * T * Rational(1, 20)))) - \
a[k, 5] / T
Bk = assert_subs(Bk, (Bk, Bk_rep))
write_eq(B_sym[k], Bk, register=True, sympy=True)
write_section('Reverse Reaction Rate')
kr = kf / Kc
kr_sym = MyIndexedFunc(r'{k_r}', args=(T))
write_conditional(kr_sym[i], kf_sym[i] / Kc_sym[i], r'\quad if non-explicit',
enum_conds=reversible_type.non_explicit)
register_equal(kr_sym[i], kf_sym[i] / Kc_sym[i])
A_rexp = MyIndexedBase(r'{A_{r}}')
Beta_rexp = MyIndexedBase(r'{\beta_r}')
Ea_rexp = MyIndexedBase(r'{E_{a,r}}')
kr_rexp = A_rexp[i] * T**Beta_rexp[i] * exp(-Ea_rexp[i] / (R * T))
Ropr_rexp = kr_rexp * Product(Ck[k]**nu_r[k, i], (k, 1, Ns))
write_conditional(Ropr_sym[i], Ropr_rexp, r'\quad if explicit',
enum_conds=reversible_type.explicit)
write_section('Third-Body Efficiencies')
thd_bdy_eff = MyIndexedBase(r'\alpha')
ci_thd = Sum(thd_bdy_eff[k, i] * Ck[k], (k, 1, Ns))
write_eq(ci_thd_sym, ci_thd)
ci_thd = assert_subs(
ci_thd,
(Sum(thd_bdy_eff[k, i] * Ck[k], (k, 1, Ns)),
Sum((thd_bdy_eff[k, i] - 1) * Ck[k], (k, 1, Ns)) +
Sum(Ck[k], (k, 1, Ns))),
(Sum(Ck[k], (k, 1, Ns)),
Ctot_sym),
)
write_eq(ci_thd_sym, ci_thd)
ci_thd = assert_subs(ci_thd,
(Sum((thd_bdy_eff[k, i] - 1) * Ck[k], (k, 1, Ns)),
Sum((thd_bdy_eff[k, i] - 1) * Ck[k], (k, 1, Ns - 1)) + (thd_bdy_eff[Ns, i] - 1) * Ck[Ns]),
(Ck[Ns], Cns))
write_eq(ci_thd_sym, ci_thd)
ci_thd = assert_subs(ci_thd, (Ctot, Ctot_sym))
ci_thd = simplify(ci_thd)
write_conditional(ci_thd_sym, ci_thd, text=r'\quad for mixture as third-body',
enum_conds=thd_body_type.mix)
ci_thd_unity = assert_subs(ci_thd, (thd_bdy_eff[k, i], S.One),
(thd_bdy_eff[Ns, i], S.One),
assumptions=[(thd_bdy_eff[k, i], S.One),
(thd_bdy_eff[Ns, i], S.One)])
ci_thd_unity = simplify(ci_thd_unity)
write_conditional(ci_thd_sym, ci_thd_unity, text=r'\quad for all $\alpha_{ki} = 1$',
enum_conds=thd_body_type.unity)
ci_thd_species = KroneckerDelta(Ns, m) * Cns + (
1 - KroneckerDelta(Ns, m)) * Ck[m]
ci_thd_species = assert_subs(ci_thd_species, (
Ctot, Ctot_sym))
write_conditional(ci_thd_sym, ci_thd_species, text=r'\quad for a single species third-body',
enum_conds=thd_body_type.species)
write_section('Falloff Reactions')
k0 = Symbol('A_0') * T**Symbol(r'\beta_0') * \
exp(-Symbol('E_{a, 0}') / (R * T))
kinf = Symbol(r'A_{\infty}') * T**Symbol(r'\beta_{\infty}') * \
exp(-Symbol(r'E_{a, \infty}') / (R * T))
k0_sym = MyImplicitSymbol(r'k_{0, i}', T)
write_eq(k0_sym, k0, sympy=True, register=True)
kinf_sym = MyImplicitSymbol(r'k_{\infty, i}', T)
write_eq(kinf_sym, kinf, sympy=True, register=True)
Pri_mix = ci_thd_sym * k0_sym / kinf_sym
write_conditional(Pri_sym, Pri_mix, text=r'\quad for the mixture as the third-body',
enum_conds=[thd_body_type.mix])
Pri_spec = ci_thd_species * k0_sym / kinf_sym
write_conditional(Pri_sym, Pri_spec, text=r'\quad for species $m$ as the third-body',
enum_conds=[thd_body_type.species])
Pri_unity = ci_thd_unity * k0_sym / kinf_sym
write_conditional(Pri_sym, Pri_unity, text=r'\quad for for all $\alpha_{i, j} = 1$',
enum_conds=[thd_body_type.unity])
Fi_lind = Integer(1)
write_conditional(Fi_sym, Fi_lind, text=r'\quad for Lindemann',
enum_conds=[reaction_type.fall, reaction_type.chem, falloff_form.lind])
Fcent_sym = MyImplicitSymbol('F_{cent}', T)
Atroe_sym = MyImplicitSymbol('A_{Troe}', args=(Pri_sym, Fcent_sym))
Btroe_sym = MyImplicitSymbol('B_{Troe}', args=(Pri_sym, Fcent_sym))
Fcent_power = (1 + (Atroe_sym / Btroe_sym)**2)**-1
Fi_troe = Fcent_sym**Fcent_power
Fi_troe_sym = ImplicitSymbol('F_{i}', args=(Fcent_sym, Pri_sym))
register_equal(Fi_troe_sym, Fi_troe)
write_conditional(Fi_sym, Fi_troe, text=r'\quad for Troe',
enum_conds=[reaction_type.fall, reaction_type.chem, falloff_form.troe])
X_sym = MyImplicitSymbol('X', Pri_sym)
a_fall, b_fall, c_fall, d_fall, e_fall, \
Tstar, Tstarstar, Tstarstarstar = symbols(
'a b c d e T^{*} T^{**} T^{***}')
Fi_sri = d_fall * T ** e_fall * (
a_fall * exp(-b_fall / T) + exp(-T / c_fall))**X_sym
write_conditional(Fi_sym, Fi_sri, text=r'\quad for SRI',
enum_conds=[reaction_type.fall, reaction_type.chem, falloff_form.sri])
Fcent = (S.One - a_fall) * exp(-T / Tstarstarstar) + a_fall * exp(-T / Tstar) + \
exp(-Tstarstar / T)
write_eq(Fcent_sym, Fcent, register=True, sympy=True)
Atroe = log(Pri_sym, 10) - Float(0.67) * log(Fcent_sym, 10) - Float(0.4)
write_eq(Atroe_sym, Atroe, register=True, sympy=True)
Btroe = Float(0.806) - Float(1.1762) * log(Fcent_sym, 10) - \
Float(0.14) * log(Pri_sym, 10)
write_eq(Btroe_sym, Btroe, register=True, sympy=True)
X = (1 + (log(Pri_sym, 10))**2)**-1
write_eq(X_sym, X, register=True, sympy=True)
write_section('Pressure-Dependent Reactions')
# pdep
latexfile.write('For PLog reactions\n')
A_1, A_2, beta_1, beta_2, Ea_1, Ea_2 = symbols(r'A_1 A_2 \beta_1' +
r' \beta_2 E_{a_1} E_{a_2}')
k1 = A_1 * T**beta_1 * exp(Ea_1 | |
# -*- coding:utf-8 -*-
import os
import stat
import copy
import re
import shutil
import zipfile
from os.path import join, getsize
from flask import current_app, session
from openpyxl import Workbook, load_workbook
from robot.api import TestData
from robot.parsing.model import Step
from utils.file import get_projectdirfromkey, remove_dir
from utils.mylogger import getlogger
log = getlogger("TestCaseUnite")
def getCaseContent(cpath, cname):
'''反写:自动化结果反写中,取得测试用例内容 '''
if not os.path.exists(cpath):
return "Can not find case file:"+cpath
content = ''
suite = TestData(source=cpath)
for t in suite.testcase_table.tests:
if t.name == cname:
isHand = False
if t.tags.value and 'Hand' in t.tags.value:
isHand = True
for s in t.steps:
ststr = (' ' * 4).join(s.as_list())
if ststr.strip() == 'No Operation':
continue
if isHand:
if ststr.strip().startswith('#*'):
ststr = ststr.replace('#*', '')
content += ststr + '\r\n'
return content
def export_casezip(key, exp_filedir=''):
dir = exp_filedir
if dir == '':
dir = get_projectdirfromkey(key) + '/runtime'
zip_name = os.path.basename(key) + '.zip'
zip_path = os.path.join(dir, zip_name)
try:
z = zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(key):
fpath = dirpath.replace(key, '')
fpath = fpath and fpath + os.sep or ''
for filename in filenames:
z.write(os.path.join(dirpath, filename), fpath + filename)
z.close()
except Exception as e:
log.error("下载zip用例异常:{}".format(e))
return (False, "{}".format(e))
return (True, zip_path)
def export_casexlsx(key, db, exp_filedir=''):
export_dir = key
if not os.path.isdir(export_dir):
log.error("不支持导出一个文件中的用例:"+export_dir)
return (False, "不支持导出一个文件中的用例:"+export_dir)
basename = os.path.basename(export_dir)
dir = exp_filedir
if dir == '':
dir = get_projectdirfromkey(export_dir) + '/runtime'
os.mkdir(dir) if not os.path.exists(dir) else None
export_file = os.path.join(dir, basename+'.xlsx')
db.refresh_caseinfo(export_dir, "Force")
cases = []
sql = "SELECT info_key,info_name,info_doc,info_tags FROM testcase WHERE info_key like '{}%' ;".format(
key)
res = db.runsql(sql)
for i in res:
(info_key, info_name, info_doc, info_tag) = i
cases.append([info_key, info_name, info_doc, info_tag])
wb = Workbook()
ws = wb.active
ws.append(["导出&导入用例:"])
ws.append(["'_'用在文件名前后,表示用例文件:如 '_用例文件名_' 表示'用例文件名.robot'"])
ws.append(["'-'用来连接目录,没有此符号表示没有子目录:如 '目录1-目录11' 表示 '目录1/目录11'"])
ws.append(["每个sheet的第一列,是后面用例所在的用例文件名(.robot)"])
ws.append(["... ..."])
ws.append(["注意:通过xlsx文件导入用例,如果是自动化用例,且用例已经存在,则只更新doc和tag,不更新用例内容"])
ws.append(["... ..."])
ws.append(["此'sheet'页面,不会被导入"])
ws.append(["... ..."])
ws.append(["Export&Import Cases:"])
ws.append(
["'_'after the file name,Means a suite:'_SuiteName_' means 'SuiteName.robot'"])
ws.append(
["'-'concat the dirs,no this sign no subdir:'dir1-dir11' means 'dir1/dir11'"])
ws.append(
["First Column of each sheet,is the suite name of the case in this line(.robot)"])
ws.append(["... ..."])
ws.append(["Caution:Import cases from xlsx file,if it is Auto-case and it exists,then update doc and tag Only,Do not update Case content."])
ws.append(["... ..."])
ws.append(["This 'sheet' ,Wont be imported."])
for c in cases:
if not os.path.exists(c[0]):
continue
casecontent = getCaseContent(c[0], c[1])
suitename = os.path.basename(c[0])
tags = c[3].split(',')
tags.remove('${EMPTY}') if '${EMPTY}' in tags else None
category = "Auto"
if "HAND" in tags or "Hand" in tags or 'hand' in tags:
category = "Hand"
casecontent = casecontent.replace(' '*4 + '#', '')
sheetname = _get_ws(export_dir, c[0])
#print("Get sheete name :"+sheetname)
# print(suitename,c[1],c[2],casecontent,c[3],category)
if not sheetname in wb.sheetnames:
ws = wb.create_sheet(sheetname)
#ws = wb.active
ws.append(["Suite_Name", "Case_Name", "Case_Doc",
"Case_Content", "Case_Tag", "Case_Type"])
else:
ws = wb[sheetname]
#ws = wb.active
ws.append([suitename, c[1], c[2], casecontent, c[3], category])
os.remove(export_file) if os.path.exists(export_file) else None
wb.save(export_file)
log.info("生成测试用例文件 {} 到目录 {}".format(export_dir, export_file))
return (True, export_file)
def _get_ws(export_dir, suite_key):
"""
return worksheet name
suite_key= /xxx/project/TestCase/v50/1dir1/test1.robot
expor_dir= /xxx/project/TestCase
"""
suite_name = os.path.basename(suite_key) # test1.robot
suite_dir = os.path.dirname(suite_key) # /xxx/project/TestCase/v50/1dir1
subdir = suite_dir.split(export_dir)[1] # /v50/1dir1
subdir = subdir.replace('/', '-') # _v50_1dir1
subdir = subdir[1:] # v50_1dir1
if subdir == '':
singal_suite = suite_name.split(".")[0]
return "_"+singal_suite+"_"
return subdir
def do_importfromzip(temp_file, path):
zip_file = temp_file
try:
if not os.path.exists(zip_file):
return ('fail', 'Can not find xlsx file :{}'.format(zip_file))
if not os.path.isdir(path):
return ('fail', 'The Node is NOT A DIR :{}'.format(path))
if not zipfile.is_zipfile(zip_file):
return ('fail', 'The file is not a zip file :{}'.format(os.path.basename(zip_file)))
remove_dir(path) if os.path.exists(path) else None
os.mkdir(path)
fz = zipfile.ZipFile(zip_file, 'r')
for file in fz.namelist():
fz.extract(file, path)
return ('success', path)
except Exception as e:
log.error("从zip文件导入发生异常:{}".format(e))
return ("fail", "Exception occured .")
def do_unzip_project(temp_file, path):
zip_file = temp_file
try:
if not os.path.exists(zip_file):
return ('fail', '找不到zip文件:{}'.format(zip_file))
app = current_app._get_current_object()
if not zipfile.is_zipfile(zip_file):
return ('fail', '不是一个zip文件 :{}'.format(os.path.basename(zip_file)))
remove_dir(path) if os.path.exists(path) else None
os.mkdir(path)
fz = zipfile.ZipFile(zip_file, 'r')
for file in fz.namelist():
fz.extract(file, path)
projectfile = ''
project_content = ''
for p in os.listdir(path):
if os.path.exists(os.path.join(path, p, 'platforminterface/project.conf')):
projectfile = os.path.join(
path, p, 'platforminterface/project.conf')
project_content = os.path.join(path, p)
if not projectfile:
msg = "Load Project Fail: 找不到 project.conf:{} ".format(projectfile)
log.error(msg)
return ('fail', msg)
log.info("读取 Project file: {}".format(projectfile))
with open(projectfile, 'r') as f:
for l in f:
if l.startswith('#'):
continue
if len(l.strip()) == 0:
continue
splits = l.strip().split('|')
if len(splits) != 4:
log.error("错误的 project.conf 行 " + l)
return ('fail', "错误的 project.conf 行 " + l)
(projectname, owner, users, cron) = splits
project_path = os.path.join(
app.config['AUTO_HOME'], 'workspace', owner, projectname)
if os.path.exists(project_path):
msg = '目标目录存在:{}'.format(project_path)
log.error(msg)
return ('fail', msg)
log.info("复制文件从 {} 到 {} ".format(
project_content, project_path))
try:
shutil.copytree(project_content, project_path)
except Exception as e:
return ('fail', "{}".format(e))
return ('success', project_path)
except Exception as e:
log.error("从zip文件导入发生异常:{}".format(e))
return ("fail", "Exception occured .")
def do_uploadcaserecord(temp_file):
if not os.path.exists(temp_file):
return ('fail', 'Can not find file :{}'.format(temp_file))
app = current_app._get_current_object()
total = 0
success = 0
formaterror = 0
exits = 0
with open(temp_file, 'r') as f:
for l in f:
l = l.strip()
if len(l) != 0:
total += 1
else:
continue
splits = l.split('|')
if len(splits) != 8:
formaterror += 1
log.error("uploadcaserecord 错误到列:"+l)
continue
(info_key, info_name, info_testproject, info_projectversion,
ontime, run_status, run_elapsedtime, run_user) = splits
sql = ''' INSERT into caserecord (info_key,info_name,info_testproject,info_projectversion,ontime,run_status,run_elapsedtime,run_user)
VALUES ('{}','{}','{}','{}','{}','{}','{}','{}');
'''.format(info_key, info_name, info_testproject, info_projectversion, ontime, run_status, run_elapsedtime, run_user)
res = app.config['DB'].runsql(sql)
if res:
success += 1
else:
exits += 1
log.error("uploadcaserecord 记录存在:"+l)
return ('success', 'Finished with total:{}, sucess:{}, error:{}, exists:{}'.format(total, success, formaterror, exits))
def do_importfromxlsx(temp_file, path):
xls_file = temp_file
dest_dir = path
if not os.path.isdir(dest_dir):
return ('fail', 'The Node is NOT A DIR :{}'.format(dest_dir))
if not os.path.exists(xls_file):
return ('fail', 'Can not find xlsx file :{}'.format(xls_file))
xls_name = os.path.basename(xls_file).split('.')[0]
dir_name = os.path.basename(dest_dir)
if not xls_name == dir_name:
return ('fail', 'Filename {} is not equal to dir name :{}'.format(xls_name, dest_dir))
try:
wb = load_workbook(xls_file)
update_cases = 0
unupdate_case = 0
failedlist = []
for stn in wb.sheetnames[1:]:
ws = wb[stn]
if not ws['A1'] != 'Suite_Name':
return ('fail', 'sheet:{} A1:{} Expect:Suite_Name'.format(stn, ws['A1']))
if not ws['B1'] != 'Case_Name':
return ('fail', 'sheet:{} B1:{} Expect:Case_Name'.format(stn, ws['B1']))
if not ws['C1'] != 'Case_Doc':
return ('fail', 'sheet:{} C1:{} Expect:Case_Doc'.format(stn, ws['C1']))
if not ws['D1'] != 'Case_Content':
return ('fail', 'sheet:{} C1:{} Expect:Case_Content'.format(stn, ws['D1']))
if not ws['E1'] != 'Case_Tag':
return ('fail', 'sheet:{} C1:{} Expect:Case_Tag'.format(stn, ws['E1']))
if not ws['F1'] != 'Case_Type':
return ('fail', 'sheet:{} C1:{} Expect:Case_Type'.format(stn, ws['F1']))
for r in ws.rows:
(a, b, c, d, e, f) = r
if a.value == 'Suite_Name': # omit the 1st line
continue
fields = [a.value if a.value else '',
b.value if b.value else '',
c.value if c.value else '',
d.value if d.value else '',
e.value if e.value else '',
f.value if f.value else ''
]
(done, msg) = _update_onecase(dest_dir, stn, fields)
if done:
update_cases += 1
else:
unupdate_case += 1
failedlist.append(
"sheet:{} suite:{} case:{} ->{}".format(stn, a.value, b.value, msg))
return ('success', 'S:{},F:{},Failist:{}'.format(update_cases, unupdate_case, '\n'.join(failedlist)))
except Exception as e:
log.error("do_uploadcase 异常:{}".format(e))
return ('fail', 'Deal with xlsx file fail :{}'.format(xls_file))
def _update_onecase(dest_dir, sheetname, fields):
stn = sheetname
DONE = False
robotname = fields[0].split('.')[0]
if stn.startswith('_') and stn.endswith('_'):
if not '_'+robotname+'_' == stn:
return (False, "Sheetname Should be same as the First column(no metter ext):{} vs {}".format(stn, robotname))
robotfile = os.path.join(dest_dir, robotname+'.robot')
else:
subdir = stn.replace('-', '/')
robotfile = os.path.join(dest_dir, subdir, robotname+'.robot')
file_dir = os.path.dirname(robotfile)
os.makedirs(file_dir, exist_ok=True)
log.info("Updating robotfile:{} with args:{}".format(robotfile, fields))
isHand = False
if fields[5] == '手工' or fields[5] == 'HAND' or fields[5] == 'Hand' or fields[5] == 'hand':
isHand = True
brandnew = "*** Settings ***\n" + \
"*** Variables ***\n" + \
"*** Test Cases ***\n" + \
"NewTestCase\n" + \
" [Documentation] This is Doc \n" + \
" [Tags] tag1 tag2\n" + \
" Log This is a Brandnew case.\n"
name = fields[1].strip()
doc = fields[2].strip()
content = fields[3].strip()
tags = fields[4].strip().replace(',', ',').split(',') # Chinese characters
if isHand:
tags.append('Hand')
tags = list(set(tags))
space_splitter = re.compile(u'[ \t\xa0]{2,}|\t+') # robot spliter
try:
# 如果文件不存在,直接创建文件和用例
if not os.path.exists(robotfile):
log.info("测试用例文件不存在,创建 :"+robotfile)
with open(robotfile, 'w') as f:
f.write(brandnew)
suite = TestData(source=robotfile)
t = suite.testcase_table.tests[0]
t.name = name
t.tags.value = tags
t.doc.value = doc.replace('\n', '\\n')
steps = []
if isHand:
lines = content.split('\n')
for l in lines:
step = Step([], comment="#*"+l.strip())
steps.append(step)
steps.append(Step(["No | |
<reponame>abrikoseg/batchflow
""" Progress notifier. """
import sys
import math
from time import time, gmtime, strftime
from tqdm import tqdm
from tqdm.notebook import tqdm as tqdm_notebook
from tqdm.autonotebook import tqdm as tqdm_auto
import numpy as np
import matplotlib.pyplot as plt
try:
from IPython import display
except ImportError:
pass
from .named_expr import NamedExpression, eval_expr
from .monitor import ResourceMonitor, MONITOR_ALIASES
from .utils_telegram import TelegramMessage
class DummyBar:
""" Progress tracker without visual representation. """
#pylint: disable=invalid-name
def __init__(self, total, *args, **kwargs):
self.total = total
self.args, self.kwargs = args, kwargs
self.n = 0
self.desc = ''
self.postfix = ''
self.start_t = time()
def update(self, n):
self.n += n
@property
def format_dict(self):
return {'n': self.n, 'total': self.total, 't': time() - self.start_t}
def format_meter(self, n, total, t, **kwargs):
_ = kwargs
return f'{n}/{total} iterations done; elapsed time is {t:3.3} seconds'
def display(self, *args, **kwargs):
_ = args, kwargs
def set_description(self, desc):
self.desc = desc
def set_postfix_str(self, postfix):
self.postfix = postfix
def close(self):
pass
class Notifier:
""" Progress tracker and a resource monitor tool in one.
Allows to dynamically track and display containers (pipeline variables, images, monitor),
log them to file in both textual and visual formats.
Instance can be used to wrap iterators or by calling :meth:`.update` manually.
Parameters
----------
bar : {'n', 'a', 'j', True} or callable
Sets the type of used progress bar:
- `callable` must provide a tqdm-like interface.
- `n` stands for notebook version of tqdm bar.
- `a` stands for automatic choise of appropriate tqdm bar.
- `j` stands for graph drawing as a progress bar.
- `t` or True for standard text tqdm is used.
- otherwise, no progress bar will be displayed. Note that iterations,
as well as everything else (monitors, variables, logs) are still tracked.
update_total : bool
Whether the total amount of iterations should be computed at initialization.
desc : str
Prefix for created descriptions.
disable : bool
Whether to disable the notifier completely: progress bar, monitors and graphs.
total, batch_size, n_iters, n_epochs, drop_last, length
Parameters to calculate total amount of iterations.
frequency : int
Frequency of notifier updates.
monitors : str, :class:`.Monitor`, :class:`.NamedExpression`, dict or sequence of them
Set tracked ('monitored') entities: they are displayed in the bar description.
Strings are either registered monitor identifiers or names of pipeline variables.
Named expressions are evaluated with the pipeline.
If dict, then 'source' key should be one of the above to identify container.
Other available keys:
- 'name' is used to display at bar descriptions and plot titles
- 'plot_function' is used to display container data.
Can be used to change the default way of displaying graphs.
graphs : str, :class:`.Monitor`, :class:`.NamedExpression`, or sequence of them
Same semantics, as `monitors`, but tracked entities are displayed in dynamically updated plots.
log_file : str
If provided, a textual log is written into the supplied path.
telegram : bool
Whether to send notifications to a Telegram Bot. Works with both textual bars and figures (from `graphs`).
Under the hood, keeps track of two messages - one with text, one with media, and edits them when needed.
`silent` parameters controls, whether messages are sent with notifications or not.
One must supply telegram `token` and `chat_id` either by passing directly or
setting environment variables `TELEGRAM_TOKEN` and `TELEGRAM_CHAT_ID`. To get them:
- create a bot <https://core.telegram.org/bots#6-botfather> and copy its `{token}`
- add the bot to a chat and send it a message such as `/start`
- go to <https://api.telegram.org/bot`{token}`/getUpdates> to find out the `{chat_id}`
window : int
Allows to plot only the last `window` values from every tracked container.
layout : str
If `h`, then subplots are drawn horizontally; vertically otherwise.
figsize : tuple of numbers
Total size of drawn figure.
savepath : str
Path to save image, created by tracking entities with `graphs`.
*args, **kwargs
Positional and keyword arguments that are used to create underlying progress bar.
"""
COLOUR_RUNNING = '#2196f3'
COLOUR_SUCCESS = '#4caf50'
COLOUR_FAILURE = '#f44336'
def __init__(self, bar=None, *args, update_total=True, disable=False,
total=None, batch_size=None, n_iters=None, n_epochs=None, drop_last=False, length=None,
frequency=1, monitors=None, graphs=None, log_file=None,
telegram=False, token=<PASSWORD>, chat_id=None, silent=True,
window=None, layout='h', figsize=None, savepath=None, **kwargs):
# Prepare data containers like monitors and pipeline variables
if monitors:
monitors = monitors if isinstance(monitors, (tuple, list)) else [monitors]
else:
monitors = []
if graphs:
graphs = graphs if isinstance(graphs, (tuple, list)) else [graphs]
else:
graphs = []
self.has_monitors = False
self.has_graphs = len(graphs) > 0
self.n_monitors = len(monitors)
self.data_containers = []
for container in monitors + graphs:
if not isinstance(container, dict):
container = {'source': container}
if isinstance(container['source'], str) and container['source'].lower() in MONITOR_ALIASES:
container['source'] = MONITOR_ALIASES[container['source'].lower()]()
source = container.get('source')
if source is None:
raise ValueError('Passed dictionaries as `monitors` or `graphs` should contain `source` key!')
if isinstance(source, ResourceMonitor):
self.has_monitors = True
if 'name' not in container:
if isinstance(source, ResourceMonitor):
container['name'] = source.__class__.__name__
elif isinstance(source, NamedExpression):
container['name'] = source.name
elif isinstance(source, str):
container['name'] = source
else:
container['name'] = None
self.data_containers.append(container)
self.frequency = frequency
self.timestamps = []
self.start_monitors()
# Prepare file log
self.log_file = log_file
if self.log_file:
with open(self.log_file, 'w'):
pass
# Create bar; set the number of total iterations, if possible
self.bar = None
bar_func = None
if callable(bar):
bar_func = bar
elif bar in ['n', 'nb', 'notebook', 'j', 'jpn', 'jupyter']:
bar_func = tqdm_notebook
elif bar in [True, 'a', 'auto']:
bar_func = tqdm_auto
elif bar in ['t', 'tqdm']:
bar_func = tqdm
elif bar in ['telegram', 'tg']:
bar_func = tqdm_auto
telegram = True
elif bar in [False, None]:
bar_func = DummyBar
else:
raise ValueError('Unknown bar value:', bar)
# Set default values for bars
if bar_func is tqdm or bar_func is tqdm_notebook:
if bar_func is tqdm:
ncols = min(80 + 10 * self.n_monitors, 120)
colour = self.COLOUR_SUCCESS
elif bar_func is tqdm_notebook:
ncols = min(700 + 150 * self.n_monitors, 1000)
colour = None
kwargs = {
'ncols': ncols,
'colour': colour,
'file': sys.stdout,
**kwargs
}
self.bar_func = lambda total: bar_func(total=total, *args, **kwargs)
# Turn off everything if `disable`
self._disable = disable
if update_total:
self.update_total(total=total, batch_size=batch_size, n_iters=n_iters, n_epochs=n_epochs,
drop_last=drop_last, length=length)
# Prepare plot params
#pylint: disable=invalid-unary-operand-type
self.slice = slice(-window, None, None) if isinstance(window, int) else slice(None)
self.layout, self.figsize, self.savepath = layout, figsize, savepath
# Prepare Telegram notifications
self.telegram = telegram
if self.telegram:
self.telegram_text = TelegramMessage(token=token, chat_id=chat_id, silent=silent)
self.telegram_media = TelegramMessage(token=token, chat_id=chat_id, silent=silent)
def update_total(self, batch_size, n_iters, n_epochs, drop_last, length, total=None):
""" Re-calculate total number of iterations. """
if total is None:
if n_iters is not None:
total = n_iters
if n_epochs is not None:
if drop_last:
total = length // batch_size * n_epochs
else:
total = math.ceil(length * n_epochs / batch_size)
# Force close previous bar, create new
if self.bar is not None:
try:
# jupyter bar must be closed and reopened
self.bar.display(close=True)
self.bar = self.bar_func(total=total)
except TypeError:
# text bar can work with a simple reassigning of `total`
self.bar.total = total
else:
self.bar = self.bar_func(total=total)
if self._disable:
self.disable()
def disable(self):
""" Completely disable notifier: progress bar, monitors and graphs. """
if self.bar is not None:
try:
# jupyter bar must be closed and reopened
self.bar.display(close=True)
except TypeError:
pass
finally:
self.bar = DummyBar(total=self.total)
self.data_containers = []
self.has_graphs = False
self.log_file = None
self.telegram = False
def update(self, n=1, pipeline=None, batch=None):
""" Update Notifier with new info:
- fetch up-to-date data from batch, pipeline and monitors
- set bar postfix
- draw plots anew
- update log log_file
- send notifications to Telegram
- increment underlying progress bar tracker
"""
if self.bar.n == 0 or (self.bar.n + 1) % self.frequency == 0 or (self.bar.n == self.bar.total - 1):
self.timestamps.append(gmtime())
if self.data_containers:
self.update_data(pipeline=pipeline, batch=batch)
self.update_postfix()
if self.has_graphs:
self.update_plots(index=self.n_monitors, add_suptitle=True)
if self.log_file:
self.update_log_file()
if self.telegram:
self.update_telegram()
self.bar.update(n)
def update_data(self, pipeline=None, batch=None):
""" Get data from monitor or pipeline. """
for container in self.data_containers:
source = container['source']
if isinstance(source, ResourceMonitor):
source.fetch()
container['data'] = source.data
elif isinstance(source, str):
value = pipeline.v(source)
container['data'] = value
else:
value = eval_expr(source, pipeline=pipeline, batch=batch)
container['data'] = value
def update_postfix(self):
""" Set the new bar description, if needed. """
postfix = self.create_description(iteration=-1)
previous_postfix = | |
function]
cls.add_method('InitEndDeviceInfo',
'ns3::LoRaWANEndDeviceInfoNS',
[param('ns3::Ipv4Address', 'arg0')])
## lorawan-gateway-application.h (module 'lorawan'): void ns3::LoRaWANNetworkServer::PopulateEndDevices() [member function]
cls.add_method('PopulateEndDevices',
'void',
[])
## lorawan-gateway-application.h (module 'lorawan'): void ns3::LoRaWANNetworkServer::RW1TimerExpired(uint32_t deviceAddr) [member function]
cls.add_method('RW1TimerExpired',
'void',
[param('uint32_t', 'deviceAddr')])
## lorawan-gateway-application.h (module 'lorawan'): void ns3::LoRaWANNetworkServer::RW2TimerExpired(uint32_t deviceAddr) [member function]
cls.add_method('RW2TimerExpired',
'void',
[param('uint32_t', 'deviceAddr')])
## lorawan-gateway-application.h (module 'lorawan'): void ns3::LoRaWANNetworkServer::SendDSPacket(uint32_t deviceAddr, ns3::Ptr<ns3::LoRaWANGatewayApplication> gatewayPtr, bool RW1, bool RW2) [member function]
cls.add_method('SendDSPacket',
'void',
[param('uint32_t', 'deviceAddr'), param('ns3::Ptr< ns3::LoRaWANGatewayApplication >', 'gatewayPtr'), param('bool', 'RW1'), param('bool', 'RW2')])
## lorawan-gateway-application.h (module 'lorawan'): void ns3::LoRaWANNetworkServer::SetConfirmedDataDown(bool confirmedData) [member function]
cls.add_method('SetConfirmedDataDown',
'void',
[param('bool', 'confirmedData')])
## lorawan-gateway-application.h (module 'lorawan'): static void ns3::LoRaWANNetworkServer::clearLoRaWANNetworkServerPointer() [member function]
cls.add_method('clearLoRaWANNetworkServerPointer',
'void',
[],
is_static=True)
## lorawan-gateway-application.h (module 'lorawan'): static ns3::Ptr<ns3::LoRaWANNetworkServer> ns3::LoRaWANNetworkServer::getLoRaWANNetworkServerPointer() [member function]
cls.add_method('getLoRaWANNetworkServerPointer',
'ns3::Ptr< ns3::LoRaWANNetworkServer >',
[],
is_static=True)
## lorawan-gateway-application.h (module 'lorawan'): static bool ns3::LoRaWANNetworkServer::haveLoRaWANNetworkServerObject() [member function]
cls.add_method('haveLoRaWANNetworkServerObject',
'bool',
[],
is_static=True)
return
def register_Ns3LoRaWANPhy_methods(root_module, cls):
## lorawan-phy.h (module 'lorawan'): static ns3::TypeId ns3::LoRaWANPhy::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## lorawan-phy.h (module 'lorawan'): ns3::LoRaWANPhy::LoRaWANPhy() [constructor]
cls.add_constructor([])
## lorawan-phy.h (module 'lorawan'): ns3::LoRaWANPhy::LoRaWANPhy(uint8_t arg0) [constructor]
cls.add_constructor([param('uint8_t', 'arg0')])
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::PrintCurrentTxConf() const [member function]
cls.add_method('PrintCurrentTxConf',
'void',
[],
is_const=True)
## lorawan-phy.h (module 'lorawan'): bool ns3::LoRaWANPhy::SetTxConf(int8_t power, uint8_t channelIndex, uint8_t dataRateIndex, uint8_t codeRate, uint8_t preambleLength, bool implicitHeader, bool crcOn) [member function]
cls.add_method('SetTxConf',
'bool',
[param('int8_t', 'power'), param('uint8_t', 'channelIndex'), param('uint8_t', 'dataRateIndex'), param('uint8_t', 'codeRate'), param('uint8_t', 'preambleLength'), param('bool', 'implicitHeader'), param('bool', 'crcOn')])
## lorawan-phy.h (module 'lorawan'): uint8_t ns3::LoRaWANPhy::GetCurrentChannelIndex() const [member function]
cls.add_method('GetCurrentChannelIndex',
'uint8_t',
[],
is_const=True)
## lorawan-phy.h (module 'lorawan'): uint8_t ns3::LoRaWANPhy::GetCurrentDataRateIndex() const [member function]
cls.add_method('GetCurrentDataRateIndex',
'uint8_t',
[],
is_const=True)
## lorawan-phy.h (module 'lorawan'): ns3::Time ns3::LoRaWANPhy::CalculateTxTime(uint8_t payloadLength) [member function]
cls.add_method('CalculateTxTime',
'ns3::Time',
[param('uint8_t', 'payloadLength')])
## lorawan-phy.h (module 'lorawan'): ns3::Time ns3::LoRaWANPhy::CalculatePreambleTime() [member function]
cls.add_method('CalculatePreambleTime',
'ns3::Time',
[])
## lorawan-phy.h (module 'lorawan'): bool ns3::LoRaWANPhy::preambleDetected() const [member function]
cls.add_method('preambleDetected',
'bool',
[],
is_const=True)
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::SetMobility(ns3::Ptr<ns3::MobilityModel> m) [member function]
cls.add_method('SetMobility',
'void',
[param('ns3::Ptr< ns3::MobilityModel >', 'm')],
is_virtual=True)
## lorawan-phy.h (module 'lorawan'): ns3::Ptr<ns3::MobilityModel> ns3::LoRaWANPhy::GetMobility() [member function]
cls.add_method('GetMobility',
'ns3::Ptr< ns3::MobilityModel >',
[],
is_virtual=True)
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::SetChannel(ns3::Ptr<ns3::SpectrumChannel> c) [member function]
cls.add_method('SetChannel',
'void',
[param('ns3::Ptr< ns3::SpectrumChannel >', 'c')],
is_virtual=True)
## lorawan-phy.h (module 'lorawan'): ns3::Ptr<ns3::SpectrumChannel> ns3::LoRaWANPhy::GetChannel() [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::SpectrumChannel >',
[])
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::SetDevice(ns3::Ptr<ns3::NetDevice> d) [member function]
cls.add_method('SetDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'd')],
is_virtual=True)
## lorawan-phy.h (module 'lorawan'): ns3::Ptr<ns3::NetDevice> ns3::LoRaWANPhy::GetDevice() [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[],
is_virtual=True)
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::SetAntenna(ns3::Ptr<ns3::AntennaModel> a) [member function]
cls.add_method('SetAntenna',
'void',
[param('ns3::Ptr< ns3::AntennaModel >', 'a')])
## lorawan-phy.h (module 'lorawan'): ns3::Ptr<ns3::AntennaModel> ns3::LoRaWANPhy::GetRxAntenna() [member function]
cls.add_method('GetRxAntenna',
'ns3::Ptr< ns3::AntennaModel >',
[],
is_virtual=True)
## lorawan-phy.h (module 'lorawan'): ns3::Ptr<ns3::SpectrumModel const> ns3::LoRaWANPhy::GetRxSpectrumModel() const [member function]
cls.add_method('GetRxSpectrumModel',
'ns3::Ptr< ns3::SpectrumModel const >',
[],
is_const=True, is_virtual=True)
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::SetTxPowerSpectralDensity(ns3::Ptr<ns3::SpectrumValue> txPsd) [member function]
cls.add_method('SetTxPowerSpectralDensity',
'void',
[param('ns3::Ptr< ns3::SpectrumValue >', 'txPsd')])
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::SetNoisePowerSpectralDensity(ns3::Ptr<ns3::SpectrumValue const> noisePsd) [member function]
cls.add_method('SetNoisePowerSpectralDensity',
'void',
[param('ns3::Ptr< ns3::SpectrumValue const >', 'noisePsd')])
## lorawan-phy.h (module 'lorawan'): ns3::Ptr<ns3::SpectrumValue const> ns3::LoRaWANPhy::GetNoisePowerSpectralDensity() [member function]
cls.add_method('GetNoisePowerSpectralDensity',
'ns3::Ptr< ns3::SpectrumValue const >',
[])
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::StartRx(ns3::Ptr<ns3::SpectrumSignalParameters> params) [member function]
cls.add_method('StartRx',
'void',
[param('ns3::Ptr< ns3::SpectrumSignalParameters >', 'params')],
is_virtual=True)
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::SetErrorModel(ns3::Ptr<ns3::LoRaWANErrorModel> e) [member function]
cls.add_method('SetErrorModel',
'void',
[param('ns3::Ptr< ns3::LoRaWANErrorModel >', 'e')])
## lorawan-phy.h (module 'lorawan'): ns3::Ptr<ns3::LoRaWANErrorModel> ns3::LoRaWANPhy::GetErrorModel() const [member function]
cls.add_method('GetErrorModel',
'ns3::Ptr< ns3::LoRaWANErrorModel >',
[],
is_const=True)
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::SetTRXStateRequest(ns3::LoRaWANPhyEnumeration state) [member function]
cls.add_method('SetTRXStateRequest',
'void',
[param('ns3::LoRaWANPhyEnumeration', 'state')])
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::SetPdDataIndicationCallback(ns3::PdDataIndicationCallback c) [member function]
cls.add_method('SetPdDataIndicationCallback',
'void',
[param('ns3::PdDataIndicationCallback', 'c')])
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::SetPdDataDestroyedCallback(ns3::PdDataDestroyedCallback c) [member function]
cls.add_method('SetPdDataDestroyedCallback',
'void',
[param('ns3::PdDataDestroyedCallback', 'c')])
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::SetPdDataConfirmCallback(ns3::PdDataConfirmCallback c) [member function]
cls.add_method('SetPdDataConfirmCallback',
'void',
[param('ns3::PdDataConfirmCallback', 'c')])
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::SetSetTRXStateConfirmCallback(ns3::SetTRXStateConfirmCallback c) [member function]
cls.add_method('SetSetTRXStateConfirmCallback',
'void',
[param('ns3::SetTRXStateConfirmCallback', 'c')])
## lorawan-phy.h (module 'lorawan'): int64_t ns3::LoRaWANPhy::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::PdDataRequest(uint32_t const phyPayloadLength, ns3::Ptr<ns3::Packet> p) [member function]
cls.add_method('PdDataRequest',
'void',
[param('uint32_t const', 'phyPayloadLength'), param('ns3::Ptr< ns3::Packet >', 'p')])
## lorawan-phy.h (module 'lorawan'): uint8_t ns3::LoRaWANPhy::GetIndex() const [member function]
cls.add_method('GetIndex',
'uint8_t',
[],
is_const=True)
## lorawan-phy.h (module 'lorawan'): void ns3::LoRaWANPhy::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3LoRaWANSpectrumSignalParameters_methods(root_module, cls):
## lorawan-spectrum-signal-parameters.h (module 'lorawan'): ns3::LoRaWANSpectrumSignalParameters::LoRaWANSpectrumSignalParameters() [constructor]
cls.add_constructor([])
## lorawan-spectrum-signal-parameters.h (module 'lorawan'): ns3::LoRaWANSpectrumSignalParameters::LoRaWANSpectrumSignalParameters(ns3::LoRaWANSpectrumSignalParameters const & p) [copy constructor]
cls.add_constructor([param('ns3::LoRaWANSpectrumSignalParameters const &', 'p')])
## lorawan-spectrum-signal-parameters.h (module 'lorawan'): ns3::Ptr<ns3::SpectrumSignalParameters> ns3::LoRaWANSpectrumSignalParameters::Copy() [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::SpectrumSignalParameters >',
[],
is_virtual=True)
## lorawan-spectrum-signal-parameters.h (module 'lorawan'): ns3::LoRaWANSpectrumSignalParameters::channelIndex [variable]
cls.add_instance_attribute('channelIndex', 'uint8_t', is_const=False)
## lorawan-spectrum-signal-parameters.h (module 'lorawan'): ns3::LoRaWANSpectrumSignalParameters::codeRate [variable]
cls.add_instance_attribute('codeRate', 'uint8_t', is_const=False)
## lorawan-spectrum-signal-parameters.h (module 'lorawan'): ns3::LoRaWANSpectrumSignalParameters::dataRateIndex [variable]
cls.add_instance_attribute('dataRateIndex', 'uint8_t', is_const=False)
## lorawan-spectrum-signal-parameters.h (module 'lorawan'): ns3::LoRaWANSpectrumSignalParameters::packet [variable]
cls.add_instance_attribute('packet', 'ns3::Ptr< ns3::Packet >', is_const=False)
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function]
cls.add_method('GetMu',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function]
cls.add_method('GetSigma',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mu'), param('double', 'sigma')])
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mu'), param('uint32_t', 'sigma')])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member | |
0)
m.e43 = Constraint(expr= m.x7 - m.x81 - m.x84 - m.x87 - m.x90 == 0)
m.e44 = Constraint(expr= m.x7 - m.x82 - m.x85 - m.x88 - m.x91 == 0)
m.e45 = Constraint(expr= m.x7 - m.x83 - m.x86 - m.x89 - m.x92 == 0)
m.e46 = Constraint(expr= m.x8 - m.x93 - m.x96 - m.x99 - m.x102 == 0)
m.e47 = Constraint(expr= m.x8 - m.x94 - m.x97 - m.x100 - m.x103 == 0)
m.e48 = Constraint(expr= m.x8 - m.x95 - m.x98 - m.x101 - m.x104 == 0)
m.e49 = Constraint(expr= m.x9 - 52.5 * m.b129 <= 0)
m.e50 = Constraint(expr= m.x10 - 52.5 * m.b130 <= 0)
m.e51 = Constraint(expr= m.x11 - 52.5 * m.b131 <= 0)
m.e52 = Constraint(expr= m.x12 - 52.5 * m.b135 <= 0)
m.e53 = Constraint(expr= m.x13 - 52.5 * m.b136 <= 0)
m.e54 = Constraint(expr= m.x14 - 52.5 * m.b137 <= 0)
m.e55 = Constraint(expr= m.x15 - 52.5 * m.b141 <= 0)
m.e56 = Constraint(expr= m.x16 - 52.5 * m.b142 <= 0)
m.e57 = Constraint(expr= m.x17 - 52.5 * m.b143 <= 0)
m.e58 = Constraint(expr= m.x18 - 52.5 * m.b147 <= 0)
m.e59 = Constraint(expr= m.x19 - 52.5 * m.b148 <= 0)
m.e60 = Constraint(expr= m.x20 - 52.5 * m.b149 <= 0)
m.e61 = Constraint(expr= m.x21 - 52.5 * m.b129 <= 0)
m.e62 = Constraint(expr= m.x22 - 51.5 * m.b132 <= 0)
m.e63 = Constraint(expr= m.x23 - 51.5 * m.b133 <= 0)
m.e64 = Constraint(expr= m.x24 - 52.5 * m.b135 <= 0)
m.e65 = Constraint(expr= m.x25 - 51.5 * m.b138 <= 0)
m.e66 = Constraint(expr= m.x26 - 51.5 * m.b139 <= 0)
m.e67 = Constraint(expr= m.x27 - 52.5 * m.b141 <= 0)
m.e68 = Constraint(expr= m.x28 - 51.5 * m.b144 <= 0)
m.e69 = Constraint(expr= m.x29 - 51.5 * m.b145 <= 0)
m.e70 = Constraint(expr= m.x30 - 52.5 * m.b147 <= 0)
m.e71 = Constraint(expr= m.x31 - 51.5 * m.b150 <= 0)
m.e72 = Constraint(expr= m.x32 - 51.5 * m.b151 <= 0)
m.e73 = Constraint(expr= m.x33 - 52.5 * m.b130 <= 0)
m.e74 = Constraint(expr= m.x34 - 51.5 * m.b132 <= 0)
m.e75 = Constraint(expr= m.x35 - 53.5 * m.b134 <= 0)
m.e76 = Constraint(expr= m.x36 - 52.5 * m.b136 <= 0)
m.e77 = Constraint(expr= m.x37 - 51.5 * m.b138 <= 0)
m.e78 = Constraint(expr= m.x38 - 53.5 * m.b140 <= 0)
m.e79 = Constraint(expr= m.x39 - 52.5 * m.b142 <= 0)
m.e80 = Constraint(expr= m.x40 - 51.5 * m.b144 <= 0)
m.e81 = Constraint(expr= m.x41 - 53.5 * m.b146 <= 0)
m.e82 = Constraint(expr= m.x42 - 52.5 * m.b148 <= 0)
m.e83 = Constraint(expr= m.x43 - 51.5 * m.b150 <= 0)
m.e84 = Constraint(expr= m.x44 - 53.5 * m.b152 <= 0)
m.e85 = Constraint(expr= m.x45 - 52.5 * m.b131 <= 0)
m.e86 = Constraint(expr= m.x46 - 51.5 * m.b133 <= 0)
m.e87 = Constraint(expr= m.x47 - 53.5 * m.b134 <= 0)
m.e88 = Constraint(expr= m.x48 - 52.5 * m.b137 <= 0)
m.e89 = Constraint(expr= m.x49 - 51.5 * m.b139 <= 0)
m.e90 = Constraint(expr= m.x50 - 53.5 * m.b140 <= 0)
m.e91 = Constraint(expr= m.x51 - 52.5 * m.b143 <= 0)
m.e92 = Constraint(expr= m.x52 - 51.5 * m.b145 <= 0)
m.e93 = Constraint(expr= m.x53 - 53.5 * m.b146 <= 0)
m.e94 = Constraint(expr= m.x54 - 52.5 * m.b149 <= 0)
m.e95 = Constraint(expr= m.x55 - 51.5 * m.b151 <= 0)
m.e96 = Constraint(expr= m.x56 - 53.5 * m.b152 <= 0)
m.e97 = Constraint(expr= m.x57 - 82 * m.b129 <= 0)
m.e98 = Constraint(expr= m.x58 - 82 * m.b130 <= 0)
m.e99 = Constraint(expr= m.x59 - 82 * m.b131 <= 0)
m.e100 = Constraint(expr= m.x60 - 82 * m.b135 <= 0)
m.e101 = Constraint(expr= m.x61 - 82 * m.b136 <= 0)
m.e102 = Constraint(expr= m.x62 - 82 * m.b137 <= 0)
m.e103 = Constraint(expr= m.x63 - 82 * m.b141 <= 0)
m.e104 = Constraint(expr= m.x64 - 82 * m.b142 <= 0)
m.e105 = Constraint(expr= m.x65 - 82 * m.b143 <= 0)
m.e106 = Constraint(expr= m.x66 - 82 * m.b147 <= 0)
m.e107 = Constraint(expr= m.x67 - 82 * m.b148 <= 0)
m.e108 = Constraint(expr= m.x68 - 82 * m.b149 <= 0)
m.e109 = Constraint(expr= m.x69 - 82 * m.b129 <= 0)
m.e110 = Constraint(expr= m.x70 - 82.5 * m.b132 <= 0)
m.e111 = Constraint(expr= m.x71 - 82.5 * m.b133 <= 0)
m.e112 = Constraint(expr= m.x72 - 82 * m.b135 <= 0)
m.e113 = Constraint(expr= m.x73 - 82.5 * m.b138 <= 0)
m.e114 = Constraint(expr= m.x74 - 82.5 * m.b139 <= 0)
m.e115 = Constraint(expr= m.x75 - 82 * m.b141 <= 0)
m.e116 = Constraint(expr= m.x76 - 82.5 * m.b144 <= 0)
m.e117 = Constraint(expr= m.x77 - 82.5 * m.b145 <= 0)
m.e118 = Constraint(expr= m.x78 - 82 * m.b147 <= 0)
m.e119 = Constraint(expr= m.x79 - 82.5 * m.b150 <= 0)
m.e120 = Constraint(expr= m.x80 - 82.5 * m.b151 <= 0)
m.e121 = Constraint(expr= m.x81 - 82 * m.b130 <= 0)
m.e122 = Constraint(expr= m.x82 - 82.5 * m.b132 <= 0)
m.e123 = Constraint(expr= m.x83 - 83.5 * m.b134 <= 0)
m.e124 = Constraint(expr= m.x84 - 82 * m.b136 <= 0)
m.e125 = Constraint(expr= m.x85 - 82.5 * m.b138 <= 0)
m.e126 = Constraint(expr= m.x86 - 83.5 * m.b140 <= 0)
m.e127 = Constraint(expr= m.x87 - 82 * m.b142 <= 0)
m.e128 = Constraint(expr= m.x88 - 82.5 * m.b144 <= 0)
m.e129 = Constraint(expr= m.x89 - 83.5 * m.b146 <= 0)
m.e130 = Constraint(expr= m.x90 - 82 * m.b148 <= 0)
m.e131 = Constraint(expr= m.x91 - 82.5 * m.b150 <= 0)
m.e132 = Constraint(expr= m.x92 - 83.5 * m.b152 <= 0)
m.e133 = Constraint(expr= m.x93 - 82 * m.b131 <= 0)
m.e134 = Constraint(expr= m.x94 - 82.5 * m.b133 <= 0)
m.e135 = Constraint(expr= m.x95 - 83.5 * m.b134 <= 0)
m.e136 = Constraint(expr= m.x96 - 82 * m.b137 <= 0)
m.e137 = Constraint(expr= m.x97 - 82.5 * m.b139 <= 0)
m.e138 = Constraint(expr= m.x98 - 83.5 * m.b140 <= 0)
m.e139 = Constraint(expr= m.x99 - 82 * m.b143 <= 0)
m.e140 = Constraint(expr= m.x100 - 82.5 * m.b145 <= 0)
m.e141 = Constraint(expr= m.x101 - 83.5 * m.b146 <= 0)
m.e142 = Constraint(expr= m.x102 - 82 * m.b149 <= 0)
m.e143 = Constraint(expr= m.x103 - 82.5 * m.b151 <= 0)
m.e144 = Constraint(expr= m.x104 - 83.5 * m.b152 <= 0)
m.e145 = Constraint(expr= m.x9 - m.x21 + 6 * m.b129 <= 0)
m.e146 = Constraint(expr= m.x10 - m.x33 + 4 * m.b130 <= 0)
m.e147 = Constraint(expr= m.x11 - m.x45 + 3.5 * m.b131 <= 0)
m.e148 = Constraint(expr= m.x22 - m.x34 + 5 * m.b132 <= 0)
m.e149 = Constraint(expr= m.x23 - m.x46 + 4.5 * m.b133 <= 0)
m.e150 = Constraint(expr= m.x35 - m.x47 + 2.5 * m.b134 <= 0)
m.e151 = Constraint(expr= -m.x12 + m.x24 + 6 * m.b135 <= 0)
m.e152 = Constraint(expr= -m.x13 + m.x36 + 4 * m.b136 <= 0)
m.e153 = Constraint(expr= -m.x14 + m.x48 + 3.5 * m.b137 <= 0)
m.e154 = Constraint(expr= -m.x25 + m.x37 + 5 * m.b138 <= 0)
m.e155 = Constraint(expr= -m.x26 + m.x49 + 4.5 * m.b139 <= 0)
m.e156 = Constraint(expr= -m.x38 + m.x50 + 2.5 * m.b140 <= 0)
m.e157 = Constraint(expr= m.x63 - m.x75 + 5.5 * m.b141 <= 0)
m.e158 = Constraint(expr= m.x64 - m.x87 + 4.5 * m.b142 <= 0)
m.e159 = Constraint(expr= m.x65 - m.x99 + 4.5 * m.b143 <= 0)
m.e160 = Constraint(expr= m.x76 - m.x88 + 4 * m.b144 <= 0)
m.e161 = Constraint(expr= m.x77 - m.x100 + 4 * m.b145 <= 0)
m.e162 = Constraint(expr= m.x89 - m.x101 + 3 * m.b146 <= 0)
m.e163 = Constraint(expr= -m.x66 + m.x78 + 5.5 * m.b147 <= 0)
m.e164 = Constraint(expr= -m.x67 + m.x90 + 4.5 * m.b148 <= 0)
m.e165 = Constraint(expr= -m.x68 + m.x102 + 4.5 * m.b149 <= 0)
m.e166 = Constraint(expr= -m.x79 + m.x91 + 4 * m.b150 <= 0)
m.e167 = Constraint(expr= -m.x80 + m.x103 + 4 * m.b151 <= 0)
m.e168 = Constraint(expr= -m.x92 + m.x104 + 3 * m.b152 <= 0)
m.e169 = Constraint(expr= m.b129 + m.b135 + m.b141 + m.b147 == 1)
m.e170 = Constraint(expr= m.b130 + m.b136 + m.b142 + m.b148 == 1)
m.e171 = Constraint(expr= m.b131 + m.b137 + m.b143 + m.b149 == 1)
m.e172 = Constraint(expr= m.b132 + m.b138 + m.b144 + m.b150 == 1)
m.e173 = Constraint(expr= m.b133 + m.b139 + m.b145 + m.b151 == 1)
m.e174 = Constraint(expr= m.b134 + m.b140 + m.b146 + m.b152 == 1)
m.e175 = Constraint(expr= m.x1 - m.x105 - m.x109 | |
= oldLDFlags
updateMakeFileForDarwin("CA/src/c_make.as", addedCFlags, addedLDFlags)
os.system("bash install.sh")
fileOptions = utils.getCommandOutput("file -b --mime-type INSTALL.py", False)
if fileOptions == "":
fileOptions = utils.getCommandOutput("file -b --mime INSTALL.py", False)
if fileOptions != "":
# fix file command used by MaSuRCA, its not compatible with the system
if os.path.exists("bin/expand_fastq"):
os.system("cp bin/expand_fastq bin/expand_fastq.orig")
testIn = open("bin/expand_fastq.orig", 'r')
testOut = open("bin/expand_fastq", 'w')
for line in testIn.xreadlines():
if "case $(file" in line:
testOut.write("case $(file -b --mime \"$FILE\" |awk '{print $1}'|sed s/\\;//g) in\n")
else:
testOut.write(line.strip() + "\n")
testIn.close()
testOut.close()
else:
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf ./Utilities/cpp%s%s-%s%sMaSuRCA"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
# update path to CA which is always hardcoded to Linux-amd64
os.system("cp bin/masurca bin/masurca.orig")
os.system("cat bin/masurca.orig | sed s/Linux-amd64/%s-%s/g |sed s/check_exec\\(\\\"jellyfish\\\"/check_exec\\(\\\"jellyfish-2.0\\\"/g > bin/masurca"%(OSTYPE, MACHINETYPE.replace("x86_64", "amd64")))
if OSTYPE == "Darwin":
os.system("cp bin/masurca bin/masurca.orig")
os.system("cat bin/masurca.orig | awk '{if (match($0, \"save NUM_SUPER_READS\")) { print $0\"\\n\\tprint FILE \\\"export NUM_SUPER_READS=\\\\$NUM_SUPER_READS\\\\n\\\";\"; } else { print $0}}' | sed s/\\(\\'..TOTAL_READS\\'/\\(\\\\\\\\\\$ENV{\\'TOTAL_READS\\'}/g| sed s/'<..$NUM_SUPER_READS.'/\"<ENVIRON[\\\\\\\\\\\"NUM_SUPER_READS\\\\\\\\\\\"]\"/g | sed s/'>=..$NUM_SUPER_READS.'/\">=ENVIRON[\\\\\\\\\\\"NUM_SUPER_READS\\\\\\\\\\\"]\"/g > bin/masurca")
# reset env variables again
addEnvironmentVar("CFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CPPFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("CXXFLAGS", " %s "%(addedCFlags))
addEnvironmentVar("LDFLAGS", " %s "%(addedLDFlags))
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf ./MaSuRCA-2.2.0")
os.system("rm msrca.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%smira"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
mira = utils.getFromPath("mira", "MIRA", False)
if mira == "":
if "mira" in packagesToInstall:
dl = 'y'
else:
print "MIRA binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
if OSTYPE == "Darwin":
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/mira_4.0rc5_darwin13.0.0_x86_64_static.tar.bz2 -o mira.tar.bz2")
else:
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/mira_4.0rc5_linux-gnu_x86_64_static.tar.bz2 -o mira.tar.bz2")
os.system("tar xvjf mira.tar.bz2")
os.system("rm -f mira.tar.bz2")
os.system("mv `ls -d mira*` ./Utilities/cpp%s%s-%s%smira"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
if not os.path.exists("./Utilities/cpp%s%s-%s%sidba"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
idba = utils.getFromPath("idba", "IDBA-UD", False)
if idba == "":
if "idba" in packagesToInstall:
dl = 'y'
else:
print "IDBA-UD binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L https://github.com/loneknightpy/idba/releases/download/1.1.3/idba-1.1.3.tar.gz -o idba.tar.gz")
os.system("tar xvzf idba.tar.gz")
os.system("mv idba-1.1.3 ./Utilities/cpp%s%s-%s%sidba"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%sidba"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mv src/sequence/short_sequence.h src/sequence/short_sequence.orig")
os.system("cat src/sequence/short_sequence.orig |awk '{if (match($0, \"kMaxShortSequence = 128\")) print \"static const uint32_t kMaxShortSequence = 32768;\"; else print $0}' > src/sequence/short_sequence.h")
os.system("mv src/basic/kmer.h src/basic/kmer.orig")
os.system("cat src/basic/kmer.orig |awk '{if (match($0, \"kNumUint64 = 4\")) print \" static const uint32_t kNumUint64 = 16;\"; else print $0}' > src/basic/kmer.h")
os.system("./configure")
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf idba.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%seautils"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
eautils = utils.getFromPath("fastq-mcf", "EA-UTILS", False)
if eautils == "":
if "eautils" in packagesToInstall:
dl = 'y'
else:
print "EA-UTILS binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L https://github.com/ExpressionAnalysis/ea-utils/tarball/master -o eautils.tar.gz")
os.system("curl -L ftp://ftp.gnu.org/gnu/gsl/gsl-1.16.tar.gz -o gsl.tar.gz")
os.system("tar xvzf eautils.tar.gz")
os.system("tar xvzf gsl.tar.gz")
os.system("mv ExpressionAnalysis-ea-utils* ./Utilities/cpp%s%s-%s%seautils"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mv gsl-1.16 ./Utilities/cpp%s%s-%s%seautils/clipper/gsl"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%seautils/clipper/gsl"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("./configure --prefix=`pwd`/build")
os.system("make")
os.system("make install")
os.chdir("..")
os.system("mv Makefile Makefile.orig")
os.system("cat Makefile.orig |sed s/CFLAGS?=/CFLAGS+=/g |sed s/CPPFLAGS?=/CPPFLAGS+=/g > Makefile")
addEnvironmentVar("CFLAGS", "-I. -L%s/Utilities/cpp%s%s-%s%seautils/gsl/build/lib/"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep))
addEnvironmentVar("CPPFLAGS", "-I. -L%s/Utilities/cpp%s%s-%s%seautils/gsl/build/lib/"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("make")
os.system("cp fastq-mcf ../")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf eautils.tar.gz")
os.system("rm -rf gsl.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%sabyss"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
abyss = utils.getFromPath("ABYSS", "ABySS", False)
if abyss == "":
if "abyss" in packagesToInstall:
dl = 'y'
else:
print "ABySS binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L https://github.com/sparsehash/sparsehash/archive/sparsehash-2.0.2.tar.gz -o sparse.tar.gz")
os.system("tar xvzf sparse.tar.gz")
os.chdir("sparsehash-sparsehash-2.0.2")
os.system("./configure --prefix=`pwd`")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("curl -L http://sourceforge.net/projects/boost/files/boost/1.54.0/boost_1_54_0.tar.gz -o boost.tar.gz")
os.system("tar xvzf boost.tar.gz")
os.system("curl -L http://www.bcgsc.ca/platform/bioinfo/software/abyss/releases/1.3.6/abyss-1.3.6.tar.gz -o abyss.tar.gz")
os.system("tar xvzf abyss.tar.gz")
os.chdir("abyss-1.3.6")
os.system("ln -s %s/boost_1_54_0/boost boost"%(METAMOS_ROOT))
addEnvironmentVar("CFLAGS", "-I%s/sparsehash-sparsehash-2.0.2/include"%(METAMOS_ROOT))
addEnvironmentVar("CPPFLAGS", "-I%s/sparsehash-sparsehash-2.0.2/include"%(METAMOS_ROOT))
addEnvironmentVar("CXXFLAGS", "-I%s/sparsehash-sparsehash-2.0.2/include"%(METAMOS_ROOT))
# sparse hash library has unused variables which cause warnings with gcc 4.8 so disable -Werror
if GCC_VERSION >= 4.8:
os.system("mv configure configure.original")
os.system("cat configure.original |sed s/\-Werror//g > configure")
os.system("chmod a+rx configure")
os.system("./configure --enable-maxk=96 --prefix=`pwd`/build")
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("mkdir ./Utilities/cpp%s%s-%s%sabyss"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mv abyss-1.3.6/build/* ./Utilities/cpp%s%s-%s%sabyss/"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
# update abysss to use installed mpi
command="mpirun"
mpi=utils.getFromPath(command, "MPI", False)
if not os.path.exists("%s%s%s"%(mpi, os.sep, command)):
command="openmpirun"
mpi=utils.getFromPath(command, "MPI", False)
if not os.path.exists("%s%s%s"%(mpi, os.sep, command)):
mpi = command = ""
os.chdir("./Utilities/cpp%s%s-%s%sabyss/bin/"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("cp abyss-pe abyss-pe-orig")
if mpi != "" and os.path.exists("ABYSS-P"):
testIn = open("abyss-pe-orig", 'r')
testOut = open("abyss-pe", 'w')
for line in testIn.xreadlines():
if "which mpirun" in line:
testOut.write("mpirun?=$(shell which %s)\n"%(command))
elif "ifdef np" in line:
testOut.write(line)
testOut.write("ifneq ($(mpirun),mpirun)\n")
elif "ABYSS-P" in line:
testOut.write(line)
testOut.write("else\n")
testOut.write("\tABYSS $(abyssopt) $(ABYSS_OPTIONS) -o $@ $(in) $(se)\n")
testOut.write("endif\n")
else:
testOut.write(line)
testIn.close()
testOut.close()
else:
print "Error: cannot find MPI in your path. Disabling ABySS threading."
os.system("cat abyss-pe-orig |awk -v found=0 -v skipping=0 '{if (match($0, \"ifdef np\")) {skipping=1; } if (skipping && match($1, \"ABYSS\")) {print $0; skipping=1; found=1} if (found && match($1, \"endif\")) {skipping=0;found = 0;} else if (skipping == 0) { print $0; } }' > abyss-pe")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf sparsehash-sparsehash-2.0.2")
os.system("rm -rf sparse.tar.gz")
os.system("rm -rf abyss-1.3.6")
os.system("rm -rf abyss.tar.gz")
os.system("rm -rf boost_1_54_0")
os.system("rm -rf boost.tar.gz")
if not os.path.exists("./Utilities/cpp%s%s-%s%ssga"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
sga = utils.getFromPath("sga", "SGA", False)
if sga == "":
if "sga" in packagesToInstall:
dl = 'y'
else:
print "SGA binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L https://github.com/sparsehash/sparsehash/archive/sparsehash-2.0.2.tar.gz -o sparse.tar.gz")
os.system("tar xvzf sparse.tar.gz")
os.chdir("sparsehash-sparsehash-2.0.2")
os.system("./configure --prefix=`pwd`")
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("curl -L https://github.com/pezmaster31/bamtools/archive/v2.3.0.tar.gz -o bamtools.tar.gz")
os.system("tar xvzf bamtools.tar.gz")
os.system("curl -L http://sourceforge.net/projects/bio-bwa/files/bwa-0.7.5a.tar.bz2 -o bwa.tar.bz2")
os.system("tar xvjf bwa.tar.bz2")
os.chdir("bwa-0.7.5a")
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.system("curl -L https://github.com/jts/sga/archive/v0.10.10.tar.gz -o sga.tar.gz")
os.system("tar xvzf sga.tar.gz")
os.system("mv sga-0.10.10 ./Utilities/cpp%s%s-%s%ssga"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mv bamtools-2.3.0 ./Utilities/cpp%s%s-%s%ssga/bamtools"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mv sparsehash-sparsehash-2.0.2 ./Utilities/cpp%s%s-%s%ssga/sparsehash"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%ssga/bamtools"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("mkdir build")
os.chdir("build")
os.system("export CC=`which gcc` && cmake ..")
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.chdir("./Utilities/cpp%s%s-%s%ssga/src"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
# sparse hash library has unused variables which cause warnings with gcc 4.8 so disable -Werror
if GCC_VERSION >= 4.8:
os.system("mv configure.ac configure.original")
os.system("cat configure.original |sed s/\-Werror//g > configure.ac")
os.system("sh ./autogen.sh")
os.system("./configure --with-sparsehash=`pwd`/../sparsehash --with-bamtools=`pwd`/../bamtools --prefix=`pwd`/../")
updateMakeFileForDarwin("Makefile", addedCFlags, addedLDFlags)
os.system("make install")
os.chdir("%s"%(METAMOS_ROOT))
os.system("mv bwa-0.7.5a/bwa ./Utilities/cpp%s%s-%s%ssga/bin/"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.system("cp %s/Utilities/cpp%s%s-%s%ssamtools %s/Utilities/cpp%s%s-%s%ssga/bin%ssamtools"%(METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep, METAMOS_ROOT, os.sep, OSTYPE, MACHINETYPE, os.sep, os.sep))
os.system("rm -rf sparsehash-sparsehash-2.0.2")
os.system("rm -rf sparse.tar.gz")
os.system("rm -rf bamtools-2.3.0")
os.system("rm -rf bamtools.tar.gz")
os.system("rm -rf sga-0.10.10")
os.system("rm -rf sga.tar.gz")
os.system("rm -rf bwa.tar.bz2")
os.system("rm -rf bwa-0.7.5a")
if not os.path.exists("./Utilities/cpp%s%s-%s%sedena"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
edena = utils.getFromPath("edena", "EDENA", False)
if "edena" in packagesToInstall:
dl = 'y'
else:
print "Edena binaries not found, optional for Assemble step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L ftp://ftp.cbcb.umd.edu/pub/data/metamos/EdenaV3_130110.tar.gz -o edena.tar.gz")
os.system("tar xvzf edena.tar.gz")
os.system("mv EdenaV3.130110 ./Utilities/cpp%s%s-%s%sedena"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp%s%s-%s%sedena"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("src/Makefile", addedCFlags, addedLDFlags)
os.system("make")
os.chdir("%s"%(METAMOS_ROOT))
os.system("rm -rf edena.tar.gz")
if not os.path.exists("./quast"):
if "quast" in packagesToInstall:
dl = 'y'
else:
print "QUAST tool not found, optional for Validate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("curl -L http://downloads.sourceforge.net/project/quast/quast-2.2.tar.gz -o quast.tar.gz")
os.system("tar xvzf quast.tar.gz")
os.system("mv ./quast-2.2 ./quast")
os.system("rm -rf quast.tar.gz")
# since quast requires a reference, also download refseq
ftpSite = "ftp://ftp.ncbi.nih.gov/genomes/"
file = "all.fna.tar.gz"
if not os.path.exists("./Utilities/DB/refseq/") and not nodbs:
print "Downloading refseq genomes (Bacteria/%s, Viruses/%s)..."%(file,file)
print "\tThis file is large and may take time to download"
os.system("curl -L %s/archive/old_refseq/Bacteria/%s -o bacteria.tar.gz"%(ftpSite, file))
os.system("curl -L %s/Viruses/%s -o viruses.tar.gz"%(ftpSite, file))
os.system("mkdir -p ./Utilities/DB/refseq/temp")
os.system("mv bacteria.tar.gz ./Utilities/DB/refseq/temp")
os.system("mv viruses.tar.gz ./Utilities/DB/refseq/temp")
os.chdir("./Utilities/DB/refseq/temp")
os.system("tar xvzf bacteria.tar.gz")
os.system("tar xvzf viruses.tar.gz")
os.chdir("..")
print "Current directory is %s"%(os.getcwd())
for file in os.listdir("%s/temp"%(os.getcwd())):
file = "%s%stemp%s%s"%(os.getcwd(), os.sep, os.sep, file)
if os.path.isdir(file):
prefix = os.path.splitext(os.path.basename(file))[0]
os.system("cat %s/*.fna > %s.fna"%(file, prefix))
os.system("rm -rf temp")
os.chdir("%s"%(METAMOS_ROOT))
if not os.path.exists("./Utilities/cpp%s%s-%s%sfreebayes"%(os.sep, OSTYPE, MACHINETYPE, os.sep)):
if "freebayes" in packagesToInstall:
dl = 'y'
else:
print "FreeBayes tool not found, optional for Validate step, download now?"
dl = raw_input("Enter Y/N: ")
if dl == 'y' or dl == 'Y':
os.system("git clone --recursive git://github.com/ekg/freebayes.git freebayes")
os.system("mv ./freebayes ./Utilities/cpp/%s%s-%s%sfreebayes"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
os.chdir("./Utilities/cpp/%s%s-%s%sfreebayes"%(os.sep, OSTYPE, MACHINETYPE, os.sep))
updateMakeFileForDarwin("src/makefile", addedCFlags, addedLDFlags)
# dont set static building libs on | |
import os
import sys
import locale
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import SafeConfigParser as ConfigParser
try:
from importlib import reload
except ImportError:
pass
import logging
from resources.extensions import *
class SMAConfigParser(ConfigParser, object):
def getlist(self, section, option, vars=None, separator=",", default=[], lower=True, replace=[' ']):
value = self.get(section, option, vars=vars)
if not isinstance(value, str) and isinstance(value, list):
return value
if value == '':
return list(default)
value = value.split(separator)
for r in replace:
value = [x.replace(r, '') for x in value]
if lower:
value = [x.lower() for x in value]
value = [x.strip() for x in value]
return value
def getdict(self, section, option, vars=None, listseparator=",", dictseparator=":", default={}, lower=True, replace=[' '], valueModifier=None):
l = self.getlist(section, option, vars, listseparator, [], lower, replace)
output = dict(default)
for listitem in l:
split = listitem.split(dictseparator, 1)
if len(split) > 1:
if valueModifier:
try:
split[1] = valueModifier(split[1])
except:
self.log.exception("Invalid value for getdict")
continue
output[split[0]] = split[1]
return output
def getpath(self, section, option, vars=None):
path = self.get(section, option, vars=vars).strip()
if path == '':
return None
return os.path.normpath(path)
def getdirectory(self, section, option, vars=None):
directory = self.getpath(section, option, vars)
try:
os.makedirs(directory)
except:
pass
return directory
def getdirectories(self, section, option, vars=None, separator=",", default=[]):
directories = self.getlist(section, option, vars=vars, separator=separator, default=default, lower=False)
directories = [os.path.normpath(x) for x in directories]
for d in directories:
if not os.path.isdir(d):
try:
os.makedirs(d)
except:
pass
return directories
def getextension(self, section, option, vars=None):
extension = self.get(section, option, vars=vars).lower().replace(' ', '').replace('.', '')
if extension == '':
return None
return extension
def getextensions(self, section, option, separator=",", vars=None):
return self.getlist(section, option, vars, separator, replace=[' ', '.'])
def getint(self, section, option, vars=None):
if sys.version[0] == '2':
return int(super(SMAConfigParser, self).get(section, option, vars=vars))
return super(SMAConfigParser, self).getint(section, option, vars=vars)
class ReadSettings:
defaults = {
'Converter': {
'ffmpeg': 'ffmpeg' if os.name != 'nt' else 'ffmpeg.exe',
'ffprobe': 'ffprobe' if os.name != 'nt' else 'ffprobe.exe',
'threads': 0,
'hwaccels': '',
'hwaccel-decoders': 'h264_cuvid, mjpeg_cuvid, mpeg1_cuvid, mpeg2_cuvid, mpeg4_cuvid, vc1_cuvid, hevc_qsv, h264_qsv, hevc_vaapi, h264_vaapi',
'hwdevices': 'vaapi:/dev/dri/renderD128',
'hwaccel-output-format': 'vaapi:vaapi',
'output-directory': '',
'output-format': 'mp4',
'output-extension': 'mp4',
'temp-extension': '',
'minimum-size': '0',
'ignored-extensions': 'nfo, ds_store',
'copy-to': '',
'move-to': '',
'delete-original': True,
'sort-streams': True,
'process-same-extensions': False,
'bypass-if-copying-all': False,
'force-convert': False,
'post-process': False,
'wait-post-process': False,
'detailed-progress': False,
'opts-separator': ',',
'preopts': '',
'postopts': '',
'regex-directory-replace': r'[^\w\-_\. ]',
},
'Permissions': {
'chmod': '0644',
'uid': -1,
'gid': -1,
},
'Metadata': {
'relocate-moov': True,
'full-path-guess': True,
'tag': True,
'tag-language': 'eng',
'download-artwork': 'poster',
'sanitize-disposition': '',
'strip-metadata': False,
'keep-titles': False,
},
'Video': {
'codec': 'h264, x264',
'max-bitrate': 0,
'bitrate-ratio': '',
'crf': -1,
'crf-profiles': '',
'preset': '',
'codec-parameters': '',
'dynamic-parameters': False,
'max-width': 0,
'profile': '',
'max-level': 0.0,
'pix-fmt': '',
'filter': '',
'force-filter': False,
},
'HDR': {
'codec': '',
'pix-fmt': '',
'space': 'bt2020nc',
'transfer': 'smpte2084',
'primaries': 'bt2020',
'preset': '',
'codec-parameters': '',
'filter': '',
'force-filter': False,
'profile': '',
},
'Audio': {
'codec': 'ac3',
'languages': '',
'default-language': '',
'first-stream-of-language': False,
'allow-language-relax': True,
'channel-bitrate': 128,
'max-bitrate': 0,
'max-channels': 0,
'prefer-more-channels': True,
'default-more-channels': True,
'filter': '',
'force-filter': False,
'sample-rates': '',
'sample-format': '',
'copy-original': False,
'copy-original-before': False,
'aac-adtstoasc': False,
'ignore-truehd': 'mp4, m4v',
'ignored-dispositions': '',
'unique-dispositions': False,
'stream-codec-combinations': '',
},
'Universal Audio': {
'codec': 'aac',
'channel-bitrate': 128,
'first-stream-only': False,
'move-after': False,
'filter': '',
'force-filter': False,
},
'Audio.ChannelFilters': {
'6-2': 'pan=stereo|FL=0.5*FC+0.707*FL+0.707*BL+0.5*LFE|FR=0.5*FC+0.707*FR+0.707*BR+0.5*LFE',
},
'Subtitle': {
'codec': 'mov_text',
'codec-image-based': '',
'languages': '',
'default-language': '',
'first-stream-of-language': False,
'encoding': '',
'burn-subtitles': False,
'burn-dispositions': '',
'embed-subs': True,
'embed-image-subs': False,
'embed-only-internal-subs': False,
'filename-dispositions': 'forced',
'ignore-embedded-subs': False,
'ignored-dispositions': '',
'unique-dispositions': False,
'attachment-codec': '',
},
'Subtitle.Subliminal': {
'download-subs': False,
'download-hearing-impaired-subs': False,
'providers': '',
},
'Subtitle.Subliminal.Auth': {
'opensubtitles': '',
'tvsubtitles': '',
},
'Sonarr': {
'host': 'localhost',
'port': 8989,
'apikey': '',
'ssl': False,
'webroot': '',
'force-rename': False,
'rescan': True,
'block-reprocess': False,
},
'Radarr': {
'host': 'localhost',
'port': 7878,
'apikey': '',
'ssl': False,
'webroot': '',
'force-rename': False,
'rescan': True,
'block-reprocess': False,
},
'Sickbeard': {
'host': 'localhost',
'port': 8081,
'ssl': False,
'apikey': '',
'webroot': '',
'username': '',
'password': '',
},
'Sickrage': {
'host': 'localhost',
'port': 8081,
'ssl': False,
'apikey': '',
'webroot': '',
'username': '',
'password': '',
},
'SABNZBD': {
'convert': True,
'sickbeard-category': 'sickbeard',
'sickrage-category': 'sickrage',
'sonarr-category': 'sonarr',
'radarr-category': 'radarr',
'bypass-category': 'bypass',
'output-directory': '',
'path-mapping': '',
},
'Deluge': {
'sickbeard-label': 'sickbeard',
'sickrage-label': 'sickrage',
'sonarr-label': 'sonarr',
'radarr-label': 'radarr',
'bypass-label': 'bypass',
'convert': True,
'host': 'localhost',
'port': 58846,
'username': '',
'password': '',
'output-directory': '',
'remove': False,
'path-mapping': '',
},
'qBittorrent': {
'sickbeard-label': 'sickbeard',
'sickrage-label': 'sickrage',
'sonarr-label': 'sonarr',
'radarr-label': 'radarr',
'bypass-label': 'bypass',
'convert': True,
'action-before': '',
'action-after': '',
'host': 'localhost',
'port': 8080,
'ssl': False,
'username': '',
'password': '',
'output-directory': '',
'path-mapping': '',
},
'uTorrent': {
'sickbeard-label': 'sickbeard',
'sickrage-label': 'sickrage',
'sonarr-label': 'sonarr',
'radarr-label': 'radarr',
'bypass-label': 'bypass',
'convert': True,
'webui': False,
'action-before': '',
'action-after': '',
'host': 'localhost',
'ssl': False,
'port': 8080,
'username': '',
'password': '',
'output-directory': '',
'path-mapping': '',
},
'Plex': {
'host': 'localhost',
'port': 32400,
'refresh': False,
'token': '',
},
}
migration = {
'MP4': {
'ffmpeg': "Converter.ffmpeg",
'ffprobe': "Converter.ffprobe",
'threads': 'Converter.threads',
'output_directory': 'Converter.output-directory',
'copy_to': 'Converter.copy-to',
'move_to': 'Converter.move-to',
'output_extension': 'Converter.output-extension',
'temp_extension': 'Converter.temp-extension',
'output_format': 'Converter.output-format',
'delete_original': 'Converter.delete-original',
'relocate_moov': 'Metadata.relocate-moov',
'ios-audio': 'Universal Audio.codec',
'ios-first-track-only': 'Universal Audio.first-stream-only',
'ios-move-last': 'Universal Audio.move-after',
'ios-audio-filter': 'Universal Audio.filter',
'max-audio-channels': 'Audio.max-channels',
'audio-language': 'Audio.languages',
'audio-default-language': 'Audio.default-language',
'audio-codec': 'Audio.codec',
'ignore-truehd': 'Audio.ignore-truehd',
'audio-filter': 'Audio.filter',
'audio-sample-rates': 'Audio.sample-rates',
'audio-channel-bitrate': 'Audio.channel-bitrate',
'audio-copy-original': 'Audio.copy-original',
'audio-first-track-of-language': 'Audio.first-stream-of-language',
'allow-audio-language-relax': 'Audio.allow-language-relax',
'sort-streams': 'Converter.sort-streams',
'prefer-more-channels': 'Audio.prefer-more-channels',
'video-codec': 'Video.codec',
'video-bitrate': 'Video.max-bitrate',
'video-crf': 'Video.crf',
'video-crf-profiles': 'Video.crf-profiles',
'video-max-width': 'Video.max-width',
'video-profile': 'Video.profile',
'h264-max-level': 'Video.max-level',
'aac_adtstoasc': 'Audio.aac-adtstoasc',
'hwaccels': 'Converter.hwaccels',
'hwaccel-decoders': 'Converter.hwaccel-decoders',
'subtitle-codec': 'Subtitle.codec',
'subtitle-codec-image-based': 'Subtitle.codec-image-based',
'subtitle-language': 'Subtitle.languages',
'subtitle-default-language': 'Subtitle.default-language',
'subtitle-encoding': 'Subtitle.encoding',
'burn-subtitles': 'Subtitle.burn-subtitles',
'attachment-codec': 'Subtitle.attachment-codec',
'process-same-extensions': 'Converter.process-same-extensions',
'force-convert': 'Converter.force-convert',
'fullpathguess': 'Metadata.full-path-guess',
'tagfile': 'Metadata.tag',
'tag-language': 'Metadata.tag-language',
'download-artwork': 'Metadata.download-artwork',
'download-subs': 'Subtitle.download-subs',
'download-hearing-impaired-subs': 'Subtitle.download-hearing-impaired-subs',
'embed-subs': 'Subtitle.embed-subs',
'embed-image-subs': 'Subtitle.embed-image-subs',
'embed-only-internal-subs': 'Subtitle.embed-only-internal-subs',
'sub-providers': 'Subtitle.download-providers',
'post-process': 'Converter.post-process',
'ignored-extensions': 'Converter.ignored-extensions',
'pix-fmt': 'Video.pix-fmt',
'preopts': 'Converter.preopts',
'postopts': 'Converter.postopts',
},
'SickBeard': {
'host': 'Sickbeard.host',
'port': 'Sickbeard.port',
'ssl': "Sickbeard.ssl",
'api_key': 'Sickbeard.apikey',
'web_root': 'Sickbeard.webroot',
'username': 'Sickbeard.username',
'password': '<PASSWORD>'
},
'Sonarr': {
'host': 'Sonarr.host',
'port': 'Sonarr.port',
'apikey': 'Sonarr.apikey',
'ssl': 'Sonarr.ssl',
'web_root': 'Sonarr.webroot',
},
"Radarr": {
'host': 'Radarr.host',
'port': 'Radarr.port',
'apikey': 'Radarr.apikey',
'ssl': 'Radarr.ssl',
'web_root': 'Radarr.webroot',
},
'uTorrent': {
'sickbeard-label': 'uTorrent.sickbeard-label',
'sickrage-label': 'uTorrent.sickrage-label',
'sonarr-label': 'uTorrent.sonarr-label',
'radarr-label': 'uTorrent.radarr-label',
'bypass-label': 'uTorrent.bypass-label',
'convert': 'uTorrent.convert',
'webui': 'uTorrent.webui',
'action_before': 'uTorrent.action-before',
'action_after': 'uTorrent.action-after',
'host': 'uTorrent.host',
'username': 'uTorrent.username',
'password': '<PASSWORD>',
'output_directory': 'uTorrent.output-directory',
},
"SABNZBD": {
'convert': 'SABNZBD.convert',
'sickbeard-category': 'SABNZBD.sickbeard-category',
'sickrage-category': 'SABNZBD.sickrage-category',
'sonarr-category': 'SABNZBD.sonarr-category',
'radarr-category': 'SABNZBD.radarr-category',
'bypass-category': 'SABNZBD.bypass-category',
'output_directory': 'SABNZBD.output-directory',
},
"Sickrage": {
'host': 'Sickrage.host',
'port': 'Sickrage.port',
'ssl': "Sickrage.ssl",
'api_key': 'Sickrage.apikey',
'web_root': 'Sickrage.webroot',
'username': 'Sickrage.username',
'password': '<PASSWORD>',
},
"Deluge": {
'sickbeard-label': 'Deluge.sickbeard-label',
'sickrage-label': 'Deluge.sickrage-label',
'sonarr-label': 'Deluge.sonarr-label',
'radarr-label': 'Deluge.radarr-label',
'bypass-label': 'Deluge.bypass-label',
'convert': 'Deluge.convert',
'host': 'Deluge.host',
'port': 'Deluge.port',
'username': 'Deluge.username',
'password': '<PASSWORD>',
'output_directory': 'Deluge.output-directory',
'remove': 'Deluge.remove',
},
"qBittorrent": {
'sickbeard-label': 'qBittorrent.sickbeard-label',
'sickrage-label': 'qBittorrent.sickrage-label',
'sonarr-label': 'qBittorrent.sonarr-label',
'radarr-label': 'qBittorrent.radarr-label',
'bypass-label': 'qBittorrent.bypass-label',
'convert': 'qBittorrent.convert',
'action_before': 'qBittorrent.action-before',
'action_after': 'qBittorrent.action-after',
'host': 'qBittorrent.host',
'username': 'qBittorrent.username',
'password': '<PASSWORD>',
'output_directory': 'qBittorrent.output-directory',
},
"Plex": {
'host': 'Plex.host',
'port': 'Plex.port',
'refresh': 'Plex.refresh',
'token': 'Plex.token'
},
"Permissions": {
'chmod': 'Permissions.chmod',
'uid': 'Permissions.uid',
'gid': 'Permissions.gid'
}
}
migration2 = {
"Subtitle.Subliminal": {
"download-subs": "Subtitle",
"download-hearing-impaired-subs": "Subtitle",
"providers": "Subtitle.download-providers",
}
}
CONFIG_DEFAULT = "autoProcess.ini"
CONFIG_DIRECTORY = "./config"
RESOURCE_DIRECTORY = "./resources"
RELATIVE_TO_ROOT = "../"
ENV_CONFIG_VAR = "SMA_CONFIG"
@property
def CONFIG_RELATIVEPATH(self):
return os.path.join(self.CONFIG_DIRECTORY, self.CONFIG_DEFAULT)
def __init__(self, configFile=None, logger=None):
self.log = logger or logging.getLogger(__name__)
self.log.info(sys.executable)
if sys.version_info.major == 2:
self.log.warning("Python 2 is no longer officially supported. Use with caution.")
rootpath = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), self.RELATIVE_TO_ROOT))
defaultConfigFile = os.path.normpath(os.path.join(rootpath, self.CONFIG_RELATIVEPATH))
oldConfigFile = os.path.normpath(os.path.join(rootpath, self.CONFIG_DEFAULT))
envConfigFile = os.environ.get(self.ENV_CONFIG_VAR)
if envConfigFile and os.path.exists(os.path.realpath(envConfigFile)):
configFile = os.path.realpath(envConfigFile)
self.log.debug("%s environment variable override found." % (self.ENV_CONFIG_VAR))
elif not configFile:
if not os.path.exists(defaultConfigFile) and os.path.exists(oldConfigFile):
try:
os.rename(oldConfigFile, defaultConfigFile)
self.log.info("Moved configuration file to new default location %s." % defaultConfigFile)
configFile = defaultConfigFile
except:
configFile = oldConfigFile
self.log.debug("Unable to move configuration file to new location, using old location.")
else:
configFile = defaultConfigFile
self.log.debug("Loading default config file.")
if os.path.isdir(configFile):
new = os.path.realpath(os.path.join(configFile, self.CONFIG_RELATIVEPATH))
old = os.path.realpath(os.path.join(configFile, self.CONFIG_DEFAULT))
if not os.path.exists(new) and os.path.exists(old):
configFile = old
else:
configFile = new
self.log.debug("Configuration file specified is a directory, joining with %s." % (self.CONFIG_DEFAULT))
self.log.info("Loading config file %s." % configFile)
# Setup encoding to avoid UTF-8 errors
if sys.version[0] == '2':
SYS_ENCODING = None
try:
locale.setlocale(locale.LC_ALL, "")
SYS_ENCODING = locale.getpreferredencoding()
except (locale.Error, IOError):
pass
# For OSes that are poorly configured just force UTF-8
if not SYS_ENCODING or SYS_ENCODING in ('ANSI_X3.4-1968', | |
<gh_stars>1-10
from keras.layers import Input
from keras.models import Model
from keras.layers import Dense, Dropout, Reshape, Permute
from keras.layers.convolutional import Convolution2D
from keras.layers.convolutional import MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import ELU
from keras.layers.recurrent import GRU
from keras import backend as K
from math import floor
import librosa
import matplotlib.pyplot as plt
import numpy as np
import time
import tensorflow as ts
from pydub import AudioSegment
from flask import Flask
#AUDIO PROCESSOR:
def change_3gp_to_mp3(fileName):
output_Path = fileName + ".mp3"
AudioSegment.from_file(fileName).export( output_Path, format="mp3")
return output_Path
def compute_melgram(audio_path):
''' Compute a mel-spectrogram and returns it in a shape of (1,1,96,1366), where
96 == #mel-bins and 1366 == #time frame
parameters
----------
audio_path: path for the audio file.
Any format supported by audioread will work.
'''
# mel-spectrogram parameters
SR = 12000
N_FFT = 512
N_MELS = 96
HOP_LEN = 256
DURA = 29.12 # to make it 1366 frame..
src, sr = librosa.load(audio_path, sr=SR) # whole signal
n_sample = src.shape[0]
n_sample_fit = int(DURA*SR)
if n_sample < n_sample_fit: # if too short
src = np.hstack((src, np.zeros((int(DURA*SR) - n_sample,))))
elif n_sample > n_sample_fit: # if too long
src = src[(n_sample-n_sample_fit)/2:(n_sample+n_sample_fit)/2]
logam = librosa.logamplitude
melgram = librosa.feature.melspectrogram
ret = logam(melgram(y=src, sr=SR, hop_length=HOP_LEN,
n_fft=N_FFT, n_mels=N_MELS)**2,
ref_power=1.0)
ret = ret[np.newaxis, np.newaxis, :]
return ret
def compute_melgram_multiframe(audio_path, all_song=True):
''' Compute a mel-spectrogram in multiple frames of the song and returns it in a shape of (N,1,96,1366), where
96 == #mel-bins, 1366 == #time frame, and N=#frames
parameters
----------
audio_path: path for the audio file.
Any format supported by audioread will work.
'''
# mel-spectrogram parameters
SR = 12000
N_FFT = 512
N_MELS = 96
HOP_LEN = 256
DURA = 29.12 # to make it 1366 frame..
if all_song:
DURA_TRASH = 0
else:
DURA_TRASH = 20
src, sr = librosa.load(audio_path, sr=SR) # whole signal
n_sample = src.shape[0]
n_sample_fit = int(DURA*SR)
n_sample_trash = int(DURA_TRASH*SR)
# remove the trash at the beginning and at the end
src = src[n_sample_trash:(n_sample-n_sample_trash)]
n_sample=n_sample-2*n_sample_trash
# print n_sample
# print n_sample_fit
ret = np.zeros((0, 1, 96, 1366), dtype=np.float32)
if n_sample < n_sample_fit: # if too short
src = np.hstack((src, np.zeros((int(DURA*SR) - n_sample,))))
logam = librosa.logamplitude
melgram = librosa.feature.melspectrogram
ret = logam(melgram(y=src, sr=SR, hop_length=HOP_LEN,
n_fft=N_FFT, n_mels=N_MELS)**2,
ref_power=1.0)
ret = ret[np.newaxis, np.newaxis, :]
elif n_sample > n_sample_fit: # if too long
N = int(floor(n_sample/n_sample_fit))
src_total=src
for i in range(0, N):
src = src_total[(i*n_sample_fit):(i+1)*(n_sample_fit)]
logam = librosa.logamplitude
melgram = librosa.feature.melspectrogram
retI = logam(melgram(y=src, sr=SR, hop_length=HOP_LEN,
n_fft=N_FFT, n_mels=N_MELS)**2,
ref_power=1.0)
retI = retI[np.newaxis, np.newaxis, :]
# print retI.shape
ret = np.concatenate((ret, retI), axis=0)
return ret
#Functions:
K.set_image_dim_ordering('th')
def pop_layer(model):
if not model.outputs:
raise Exception('Sequential model cannot be popped: model is empty.')
model.layers.pop()
if not model.layers:
model.outputs = []
model.inbound_nodes = []
model.outbound_nodes = []
else:
model.layers[-1].outbound_nodes = []
model.outputs = [model.layers[-1].output]
model.built = False
def MusicTaggerCRNN(weights='msd', input_tensor=None):
'''Instantiate the MusicTaggerCRNN architecture,
optionally loading weights pre-trained
on Million Song Dataset. Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
For preparing mel-spectrogram input, see
`audio_conv_utils.py` in [applications](https://github.com/fchollet/keras/tree/master/keras/applications).
You will need to install [Librosa](http://librosa.github.io/librosa/) to use it.
# Arguments
weights: one of `None` (random initialization)
or "msd" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
# Returns
A Keras model instance.
'''
if weights not in {'msd', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `msd` '
'(pre-training on Million Song Dataset).')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
input_shape = (96, 1366, 1)
else:
input_shape = (1, 96, 1366)
if input_tensor is None:
melgram_input = Input(shape=input_shape)
else:
melgram_input = Input(shape=input_tensor)
# Determine input axis
if K.image_dim_ordering() == 'th':
channel_axis = 1
freq_axis = 2
time_axis = 3
else:
channel_axis = 3
freq_axis = 1
time_axis = 2
# Input block
x = ZeroPadding2D(padding=(0, 37))(melgram_input)
x = BatchNormalization(axis=time_axis, name='bn_0_freq')(x)
# Conv block 1
x = Convolution2D(64, 3, 3, border_mode='same', name='conv1', trainable=False)(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn1', trainable=False)(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1', trainable=False)(x)
x = Dropout(0.1, name='dropout1', trainable=False)(x)
# Conv block 2
x = Convolution2D(128, 3, 3, border_mode='same', name='conv2', trainable=False)(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn2', trainable=False)(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(3, 3), strides=(3, 3), name='pool2', trainable=False)(x)
x = Dropout(0.1, name='dropout2', trainable=False)(x)
# Conv block 3
x = Convolution2D(128, 3, 3, border_mode='same', name='conv3', trainable=False)(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn3', trainable=False)(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool3', trainable=False)(x)
x = Dropout(0.1, name='dropout3', trainable=False)(x)
# Conv block 4
x = Convolution2D(128, 3, 3, border_mode='same', name='conv4', trainable=False)(x)
x = BatchNormalization(axis=channel_axis, mode=0, name='bn4', trainable=False)(x)
x = ELU()(x)
x = MaxPooling2D(pool_size=(4, 4), strides=(4, 4), name='pool4', trainable=False)(x)
x = Dropout(0.1, name='dropout4', trainable=False)(x)
# reshaping
if K.image_dim_ordering() == 'th':
x = Permute((3, 1, 2))(x)
x = Reshape((15, 128))(x)
# GRU block 1, 2, output
x = GRU(32, return_sequences=True, name='gru1')(x)
x = GRU(32, return_sequences=False, name='gru2')(x)
x = Dropout(0.3, name='final_drop')(x)
if weights is None:
# Create model
x = Dense(10, activation='sigmoid', name='output')(x)
model = Model(melgram_input, x)
return model
else:
# Load input
x = Dense(50, activation='sigmoid', name='output')(x)
if K.image_dim_ordering() == 'tf':
raise RuntimeError("Please set image_dim_ordering == 'th'."
"You can set it at ~/.keras/keras.json")
# Create model
initial_model = Model(melgram_input, x)
initial_model.load_weights('weights/music_tagger_crnn_weights_%s.h5' % K._BACKEND,
by_name=True)
# Eliminate last layer
pop_layer(initial_model)
# Add new Dense layer
last = initial_model.get_layer('final_drop')
preds = (Dense(10, activation='sigmoid', name='preds'))(last.output)
model = Model(initial_model.input, preds)
return model
# UTILS
def sort_result(tags, preds):
result = zip(tags, preds)
sorted_result = sorted(result, key=lambda x: x[1], reverse=True)
save_result_file(sorted_result)
for name, score in sorted_result:
score = np.array(score)
score *= 100
print(name, ':', '%5.3f ' % score, ' ',)
print
return sorted_result
def save_result_file(sorted_result):
file = open('result.txt', 'w')
for name, score in sorted_result:
score = np.array(score)
score *= 100
file.write(name + ':' + '%5.3f' % score + ';')
file.close()
def predict_label(preds):
labels=preds.argsort()[::-1]
return labels[0]
# Melgram computation
def extract_melgrams(list_path, MULTIFRAMES, process_all_song, num_songs_genre):
melgrams = np.zeros((0, 1, 96, 1366), dtype=np.float32)
song_paths = open(list_path, 'r').read().splitlines()
labels = list()
num_frames_total = list()
for song_ind, song_path in enumerate(song_paths):
print(song_path)
song_path = change_3gp_to_mp3(song_path)
if MULTIFRAMES:
melgram = compute_melgram_multiframe(song_path, process_all_song)
num_frames = melgram.shape[0]
num_frames_total.append(num_frames)
print ('num frames:', num_frames)
if num_songs_genre != '':
index = int(floor(song_ind/num_songs_genre))
for i in range(0, num_frames):
labels.append(index)
else:
pass
else:
melgram = compute_melgram(song_path)
melgrams = np.concatenate((melgrams, melgram), axis=0)
if num_songs_genre != '':
return melgrams, labels, num_frames_total
else:
return melgrams, num_frames_total
# Parameters to set
TEST = 1
LOAD_MODEL = 0
LOAD_WEIGHTS = 1
MULTIFRAMES = 1
time_elapsed = 0
# GTZAN Dataset Tags
tags = ['blues', 'classical', 'country', 'disco', 'hiphop', 'jazz', 'metal', 'pop', 'reggae', 'rock']
tags = np.array(tags)
# Paths to set
model_name = "example_model"
model_path = "models_trained/" + model_name + "/"
weights_path = "models_trained/" + model_name + "/weights/"
test_songs_list = 'list_example.txt'
# Errors here:
def init_model():
# Initialize model
global model, graph
model = MusicTaggerCRNN(weights=None, input_tensor=(1, 96, 1366))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
graph = ts.get_default_graph()
if LOAD_WEIGHTS:
model.load_weights(weights_path + 'crnn_net_gru_adam_ours_epoch_40.h5')
return model
def main_body():
X_test, num_frames_test = extract_melgrams(test_songs_list, MULTIFRAMES, process_all_song=False, num_songs_genre='')
num_frames_test = np.array(num_frames_test)
t0 = time.time()
print('\n--------- Predicting ---------', '\n')
results = np.zeros((X_test.shape[0], tags.shape[0]))
predicted_labels_mean = np.zeros((num_frames_test.shape[0], 1))
predicted_labels_frames = np.zeros((X_test.shape[0], 1))
song_paths = open(test_songs_list, 'r').read().splitlines()
previous_numFrames = 0
n = 0
for i in range(0, num_frames_test.shape[0]):
print('Song number' + str(i) + ': ' + song_paths[i])
num_frames = num_frames_test[i]
print('Num_frames of 30s: ', str(num_frames), '\n')
with graph.as_default():
results[previous_numFrames:previous_numFrames + num_frames] = model.predict(
X_test[previous_numFrames:previous_numFrames + num_frames, :, :, :])
s_counter = 0
for j in range(previous_numFrames, previous_numFrames + num_frames):
# normalize the results
total = results[j, :].sum()
results[j, :] = results[j, :] / total
print('Percentage of genre prediction for seconds ' + str(20 + s_counter * 30) + ' to ' \
+ str(20 + (s_counter + 1) * 30) + ': ')
sort_result(tags, results[j, :].tolist())
predicted_label_frames = predict_label(results[j, :])
predicted_labels_frames[n] = predicted_label_frames
s_counter += 1
n += 1
print('\n', 'Mean genre of the song: ')
results_song = results[previous_numFrames:previous_numFrames + num_frames]
mean = results_song.mean(0)
sorted_result = sort_result(tags, mean.tolist())
predicted_label_mean = predict_label(mean)
predicted_labels_mean[i] = predicted_label_mean
print('\n', 'The predicted music genre for the song is', str(tags[predicted_label_mean]), '!\n')
previous_numFrames = previous_numFrames + num_frames
print('************************************************************************************************')
return sorted_result
def change_to_json(sorted_result):
ziped = []
for name, score | |
<reponame>azeem59/MDFlakerForPaperUpdated
import csv
import matplotlib.pyplot as plt
import mysql_handler as mh
from prettytable import PrettyTable
import click
import datetime
import os
from pathlib import Path
from changes_github import func_timer
def save_csv(file_path, headers, rows):
with open(file_path, 'w', encoding='utf8', newline='') as f:
f_csv = csv.writer(f)
f_csv.writerow(headers)
for row in rows:
f_csv.writerow(row)
print('save data to csv done!')
def combine_csv_path(folder, file_name):
now = datetime.datetime.now()
file_name = file_name + '_' + datetime.datetime.strftime(now, '%Y-%m-%d_%H-%M-%S') + '.csv'
last_folder = Path(os.path.abspath(os.path.join(os.getcwd(), "..")))
file_dir = last_folder / 'output' / 'csv' / folder
if not os.path.exists(file_dir):
os.mkdir(file_dir)
file_path = file_dir / file_name
return file_path
def combine_image_path(folder, image_name):
now = datetime.datetime.now()
file_name = image_name + '_' + datetime.datetime.strftime(now, '%Y-%m-%d_%H-%M-%S') + '.jpg'
last_folder = Path(os.path.abspath(os.path.join(os.getcwd(), "..")))
file_dir = last_folder / 'output' / 'image' / folder
if not os.path.exists(file_dir):
os.mkdir(file_dir)
file_path = file_dir / file_name
return file_path
def show_size():
size = [[1, 9], [10, 19], [20, 29], [30, 39], [40, 49], [50, 59], [60, 69]]
x_label = ['1-9', '10-19', '20-29', '30-39', '40-49', '50-59', '60-69', '>=70']
result = []
for s in size:
start = s[0]
end = s[1]
result.append(len(mh.search_size_between(start, end)))
result.append(len(mh.search_size_bigger_than(69)))
# x = range(result)
name = "Test_Case_Size_Distribution"
plt.ylabel("Count")
plt.xlabel("Size")
plt.title(name)
plt.bar(x_label, result)
for a, b in zip(x_label, result):
plt.text(a, b + 1, '%.0f' % b, ha='center', va='bottom')
image_path = combine_image_path('size', name)
plt.savefig(image_path)
print('save image done!')
plt.show()
def show_size_bigger_than(size=30):
results = mh.search_size_bigger_than(size)
print("count: " + str(len(results)))
headers = ['Test Case', 'Size', 'Path']
file_name = 'size_bigger_than_' + str(size)
file_path = combine_csv_path('size', file_name)
save_csv(file_path, headers, results)
table = PrettyTable(headers)
for s in results:
name = s[0]
path = s[1]
size = s[2]
table.add_row([name, path, size])
print(table)
def show_size_between(start=30, end=50):
results = mh.search_size_between(start, end)
print("count: " + str(len(results)))
headers = ['Test Case', 'Size', 'Test Smells', 'Path']
file_name = 'size_between_' + str(start) + '&' + str(end)
file_path = combine_csv_path('size', file_name)
save_csv(file_path, headers, results)
table = PrettyTable(headers)
for r in results:
table.add_row([r[0], r[1], r[2], r[3]])
print(table)
def show_smell():
plt.figure(figsize=(10, 10))
plt.figure(1)
bar1 = plt.subplot(211)
bar2 = plt.subplot(223)
bar3 = plt.subplot(224)
def show_smell_1():
smells = len(mh.search_test_smell())
no_smells = len(mh.search_no_smell())
x_label = ["Test Cases with Smells", "Test Cases without Smells"]
size = [smells, no_smells]
bar1.set_title("Test Case Smell Distribution")
color = ["blue", "green"]
patches, l_text, p_text = bar1.pie(size, colors=color, labels=x_label, labeldistance=1.1,
autopct="%1.1f%%", shadow=False, startangle=90, pctdistance=0.6)
bar1.axis("equal")
bar1.legend()
def show_smell_2():
result = mh.smell_distribution()
x_label = []
y_value = []
for rt in result:
x_label.append(str(rt[0]))
y_value.append(rt[1])
bar2.set_ylabel("Number of Test Cases")
bar2.set_xlabel("Number of Smells")
bar2.set_title("Test Smells Distribution")
bar2.bar(x_label, y_value)
for a, b in zip(x_label, y_value):
bar2.text(a, b + 1, '%d' % b, ha='center', va='bottom')
def show_smell_type():
result = mh.smell_type()
x_label = []
y_value = []
for rt in result:
x_label.append(rt[0])
y_value.append(rt[1])
bar3.set_xlabel("Test Smell Type")
bar3.set_ylabel("Count")
bar3.set_title("Test Smells Type Distribution")
bar3.bar(x_label, y_value)
for a, b in zip(x_label, y_value):
bar3.text(a, b + 1, '%d' % b, ha='center', va='bottom')
folder = 'test_smell'
results = mh.search_test_smell()
headers = ['Test Case', 'Number of Smells', 'Path']
file_name = 'test_smell_count'
file_path = combine_csv_path(folder, file_name)
save_csv(file_path, headers, results)
table = PrettyTable(headers)
print("count: " + str(len(results)))
for r in results:
table.add_row([r[0], r[1], r[2]])
print(table)
show_smell_1()
show_smell_2()
show_smell_type()
image_path = combine_image_path(folder, 'Test_Smell_Distribution')
plt.savefig(image_path)
print('save image done!')
plt.show()
def show_smell_details(test_case="all"):
results = mh.search_smell_details(test_case)
headers = ['Test Case', 'Number of Smells', 'Smell type', 'Tip', 'Location', 'Path']
file_name = 'test_smell_details' + '__' + test_case
file_path = combine_csv_path('test_smell', file_name)
save_csv(file_path, headers, results)
table = PrettyTable(headers)
for r in results:
table.add_row([r[0], r[1], r[2], r[3], r[4], r[5]])
print(table)
def show_dependency_cover(days=3600):
# dependency_cover_T = len(mh.search_dependency_cover_T(days))
# git_diff_N = len(mh.search_git_diff_N(days))
# dependency_cover_F = len(mh.search_dependency_cover_F(days))
dependency_cover_F_count = mh.search_dependency_cover_F_count(days)
#
# def show_bar():
# x_label = ['dependency_cover_T', 'git_diff_N', 'dependency_cover_F&git_diff_Y']
# y_value = [dependency_cover_T, git_diff_N, dependency_cover_F]
# plt.xlabel("Dependency Coverage")
# plt.ylabel("Number of failed tests")
# plt.title("Dependency Coverage Distribution" + " (within " + str(days) + " days)")
# plt.bar(x_label, y_value)
# for a, b in zip(x_label, y_value):
# plt.text(a, b + 1, '%d' % b, ha='center', va='bottom')
# plt.show()
def show_F_count():
headers = ['Failed Test Case', 'NT-FDUC',
'Path', 'Latest Failed Build ID']
file_name = 'Dependency_Cover'
file_path = combine_csv_path('dependency_cover', file_name)
save_csv(file_path, headers, dependency_cover_F_count)
table = PrettyTable(headers)
for r in dependency_cover_F_count:
table.add_row([r[0], r[1], r[2], r[3]])
print(table)
show_F_count()
# show_bar()
def show_latest_dependency_cover(build_id=0):
results = mh.search_latest_failed_build(build_id)
if len(results) > 0:
data = []
for r in results:
temp = [r[0]]
if r[2] == 'F':
temp.append('Not Covered')
else:
temp.append('Covered')
temp.append(r[3])
temp.append(r[1])
temp.append(r[4])
temp.append(r[5])
data.append(temp)
headers = ['Failed Test', 'Coverage status', 'Previous State', 'Build ID', 'Build Finished Time', 'Path']
file_name = results[0][1] + '_dependency_cover'
file_path = combine_csv_path('dependency_cover', file_name)
save_csv(file_path, headers, data)
table = PrettyTable(headers)
for r in data:
table.add_row([r[0], r[1], r[2], r[3], r[4], r[5]])
print(table)
else:
print('Build passed or build failed without failed tests or no such build')
def show_build_history(days=3600):
failed_tests = mh.search_failed_times(days)
build_status = mh.search_build_status(days)
folder = 'build_history'
def show_status():
x_label = ['passed', 'failed without failed tests', 'failed with failed tests']
y_value = []
sum = 0
for r in build_status:
if r[0] == 2:
sum += r[1]
elif r[0] == 3 or r[0] == 4:
continue
else:
y_value.append(r[1])
y_value.append(sum)
plt.xlabel("Status")
plt.ylabel("Number of builds")
plt.title("Build History Status Distribution" + " (within " + str(days) + " days)")
plt.bar(x_label, y_value)
for a, b in zip(x_label, y_value):
plt.text(a, b + 1, '%d' % b, ha='center', va='bottom')
image_path = combine_image_path(folder, "Build_History_Status_Distribution" + "(within_" + str(days) + "_days)")
plt.savefig(image_path)
plt.show()
def show_failed_times():
headers = ['Failed Test Name', 'Failed Times', 'Path']
file_name = 'Test_Case_failed_times'
file_path = combine_csv_path(folder, file_name)
save_csv(file_path, headers, failed_tests)
table = PrettyTable(['Failed Test Name', 'Failed Times', 'Path'])
for r in failed_tests:
table.add_row([r[0], r[1], r[2]])
print(table)
show_failed_times()
show_status()
def show_flakiness_score_one(build_id):
flakiness_list = mh.flakiness_score_one(build_id)
headers = ['Build ID', 'Test Case', 'Score', 'NT-FDUC', 'Size', 'Number of Test Smells', 'Dependency Cover', 'Path']
table = PrettyTable(headers)
rows = []
if flakiness_list:
for f in flakiness_list:
for k, v in f.items():
temp = [v['build_id'], k, v['score'], v['failed_times'], v['size'], v['test_smells'],
v['dependency_cover'], v['path']]
table.add_row(temp)
rows.append(temp)
folder = 'flakiness_score'
file_name = 'Flakiness_Score_' + str(build_id)
file_path = combine_csv_path(folder, file_name)
save_csv(file_path, headers, rows)
print(table.get_string(sortby="Score", reversesort=True))
else:
print('Build passed or build failed without failed tests or no such build')
def show_flakiness_score(test_case):
results = mh.flakiness_score(test_case)
score_dic = {}
x_label = ['0', '0-1', '1-2', '2-3', '3-5', '5-7', '7-9', '>9']
y_value = [0, 0, 0, 0, 0, 0, 0, 0]
for r in results:
score = r[1] * 0.2 + (r[2] - 29 if r[2] > 30 else 0) * 0.05 + r[3] * 0.4 + (2 if r[4] == 'F' else 0)
if score == 0:
y_value[0] += 1
elif 0 < score <= 1:
y_value[1] += 1
elif 1 < score <= 2:
y_value[2] += 1
elif 2 < score <= 3:
y_value[3] += 1
elif 3 < score <= 5:
y_value[4] += 1
elif 5 < score <= 7:
y_value[5] += 1
elif 7 < score <= 9:
y_value[6] += 1
elif score > 9:
y_value[7] += 1
if score > 0:
score = format(score, '.2f')
score_dic[r[0]] = {'failed_times': r[1], 'size': r[2], 'test_smells': r[3], 'recent_cover': r[4],
'git_diff': r[5], 'build_id': r[6], 'path': r[7], 'score': score}
plt.xlabel("Flakiness Score")
plt.ylabel("Number of Test Cases")
plt.title("Flakiness Score Distribution")
plt.bar(x_label, y_value)
for a, b in zip(x_label, y_value):
plt.text(a, b + 1, '%d' % b, ha='center', va='bottom')
headers = ['Test Case', 'Score', 'NT-FDUC', 'Size', 'Number of Test Smells', 'Latest Dependency Cover',
'Latest Failed build_id', 'Path']
table = PrettyTable(headers)
rows = []
for key, value in score_dic.items():
temp = [key, value['score'], value['failed_times'], value['size'], value['test_smells'], value['recent_cover'],
value['build_id'], value['path']]
table.add_row(temp)
rows.append(temp)
folder = 'flakiness_score'
file_name = 'Flakiness_Score'
file_path = combine_csv_path(folder, file_name)
save_csv(file_path, headers, rows)
print(table.get_string(sortby="Score", reversesort=True))
image_path = combine_image_path(folder, "Flakiness_Score_Distribution")
plt.savefig(image_path)
plt.show()
@func_timer
def show_flakiness():
results = mh.search_flakiness()
headers = ['Build ID', 'Test Method', 'Flaky or Not', 'Detection Method', 'Traceback Coverage', 'Number of Smells',
'Flaky Frequency', 'Size', 'Path']
rows = []
table = PrettyTable(headers)
for res in results:
if res[3] == 'T':
method = 'Traceback Coverage'
else:
method = 'Multi-Factor'
if res[4] == 'T':
cover = 'Covered'
else:
cover = 'Not Covered'
if res[7] == 0:
smells = 'Unavailable'
size = 'Unavailable'
temp = [res[0], res[1], 'flaky', method, cover, smells, res[6], size, res[8]]
else:
temp = [res[0], res[1], 'flaky', method, cover, res[5], res[6], res[7], res[8]]
table.add_row(temp)
rows.append(temp)
folder = | |
data (no time-format in first column)
df = df.loc[df['Heures'].str.len() == 11]
# drop daylight saving time hours (no data there)
df = df.loc[(df['Éolien terrestre'] != '*') & (df['Solaire'] != '*')]
# just display beginning of hours
df['Heures'] = df['Heures'].str[:5]
# construct full date to later use as index
df['timestamp'] = df['Dates'] + ' ' + df['Heures']
df['timestamp'] = pd.to_datetime(df['timestamp'], dayfirst=True,)
# drop autumn dst hours as they contain inconsistent data (none or copy of
# hour before)
dst_transitions_autumn = [
d.replace(hour=2)
for d in pytz.timezone('Europe/Paris')._utc_transition_times
if d.year >= 2000 and d.month == 10]
df = df.loc[~df['timestamp'].isin(dst_transitions_autumn)]
df.set_index(df['timestamp'], inplace=True)
# Transfer to UTC
df.index = df.index.tz_localize('Europe/Paris')
df.index = df.index.tz_convert(None)
colmap = {
'Éolien terrestre': {
'variable': 'wind_onshore',
'region': 'FR',
'attribute': 'generation_actual',
'source': 'RTE',
'web': url,
'unit': 'MW'
},
'Solaire': {
'variable': 'solar',
'region': 'FR',
'attribute': 'generation_actual',
'source': 'RTE',
'web': url,
'unit': 'MW'
}
}
# Drop any column not in colmap
df = df[[key for key in colmap.keys() if key in df.columns]]
# Create the MultiIndex
tuples = [tuple(colmap[col][level] for level in headers)
for col in df.columns]
df.columns = pd.MultiIndex.from_tuples(tuples, names=headers)
return df
def terna_file_to_initial_dataframe(filepath):
"""
Parse the xml or read excel directly,
returning the data from the file in a simple-index dataframe.
Some files are formated as xml, some are pure excel files.
This function handles both cases.
Parameters:
----------
filepath: str
The path of the file to process
Returns:
----------
df: pandas.DataFrame
A pandas dataframe containing the data from the specified file.
"""
# First, we'll try to parse the file as if it is xml.
try:
excelHandler = ExcelHandler()
parse(filepath, excelHandler)
# Create the dataframe from the parsed data
df = pd.DataFrame(excelHandler.tables[0][2:], columns=excelHandler.tables[0][1])
# Convert the "Generation [MWh]" column to numeric
df["Generation [MWh]"] = pd.to_numeric(df["Generation [MWh]"])
except:
# In the case of an exception, treat the file as excel.
df = pd.read_excel(filepath, header=1)
return df
def read_terna(filepath, url, headers):
"""
Read a file from Terna into a dataframe
Parameters:
----------
filepath: str
The path of the file to read.
url:
The url of the Terna page.
headers:
Levels for the MultiIndex.
Returns:
----------
df: pandas.DataFrame
A pandas multi-index dataframe containing the data from the specified file.
"""
# Reading the file into a pandas dataframe
df = terna_file_to_initial_dataframe(filepath)
# Casting the "Date/Time" column to datetime
df["Date/Hour"] = pd.to_datetime(df["Date/Hour"])
# Setting the index to "Date/Hour"
# Renaming the bidding area names to conform to the codes from areas.csv
df["Bidding Area"] = "IT_" + df["Bidding Area"]
# The dictionary mapping energy types from the file to the variable - attribute pairs
# in the final format
solar_and_eolic_types = {
"Wind" : ("wind_onshore", "generation_actual"),
"Photovoltaic Estimated" : ("solar", "generation_forecast"),
"Photovoltaic Measured" : ("solar", "generation_actual")
}
# Keeping only the data for solar and eolic sources
df = df.loc[df["Type"].isin(solar_and_eolic_types.keys()), :]
# Reshaping the data so that each combination of a bidding area and type
# is represented as a column of its own.
# The new column names are formatted as follows: "Generation [MWh]:{area_code}:{type}"
df = df.pivot_table(index=["Date/Hour"], columns=["Bidding Area","Type"], aggfunc='first')
df.columns = df.columns.map(lambda x: '{}:{}:{}'.format(x[0], x[1], x[2]))
# Note that at this point the "Date/Hour" column has become the frame's index.
# Creating a mapping from column names to the corresponding multiindex hierarchy
area_codes = ["IT_CNOR", "IT_CSUD", "IT_NORD", "IT_SARD", "IT_SICI", "IT_SUD"]
column_map = {}
for area_code in area_codes:
for energy_type in solar_and_eolic_types:
variable, attribute = solar_and_eolic_types[energy_type]
column_name = "Generation [MWh]:{}:{}".format(area_code, energy_type)
column_map[column_name] = {
"region" : area_code,
"variable" : variable,
"attribute" : attribute,
"source" : "Terna",
"web" : url,
"unit" : "MWh"
}
# Drop any column not in the column mapping
df = df[list(column_map.keys())]
# Create the MultiIndex
tuples = [tuple(column_map[col][level] for level in headers)
for col in df.columns]
df.columns = pd.MultiIndex.from_tuples(tuples, names=headers)
return df
def read(data_path, areas, source_name, variable_name, res_key,
headers, param_dict, start_from_user=None, end_from_user=None):
"""
For the sources specified in the sources.yml file, pass each downloaded
file to the correct read function.
Parameters
----------
source_name : str
Name of source to read files from
variable_name : str
Indicator for subset of data available together in the same files
param_dict : dict
Dictionary of further parameters, i.e. the URL of the Source to be
placed in the column-MultiIndex
res_key : str
Resolution of the source data. Must be one of ['15min', '30min', 60min']
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
data_path : str, default: 'original_data'
Base download directory in which to save all downloaded files
start_from_user : datetime.date, default None
Start of period for which to read the data
end_from_user : datetime.date, default None
End of period for which to read the data
Returns
----------
data_set: pandas.DataFrame
A DataFrame containing the combined data for variable_name
"""
data_set = pd.DataFrame()
variable_dir = os.path.join(data_path, source_name, variable_name)
logger.info('reading %s - %s', source_name, variable_name)
files_existing = sum([len(files) for r, d, files in os.walk(variable_dir)])
files_success = 0
# Check if there are folders for variable_name
if not os.path.exists(variable_dir):
logger.warning('folder not found for %s, %s',
source_name, variable_name)
return data_set
# For each file downloaded for that variable
for container in sorted(os.listdir(variable_dir)):
# Skip this file if period covered excluded by user
if start_from_user:
# start lies after file end => filecontent is too old
if start_from_user > yaml.load(container.split('_')[1]):
continue # go to next container
if end_from_user:
# end lies before file start => filecontent is too recent
if end_from_user < yaml.load(container.split('_')[0]) - timedelta(days=1):
continue # go to next container
files = os.listdir(os.path.join(variable_dir, container))
# Check if there is only one file per folder
if len(files) == 0:
logger.warning('found no file in %s %s %s',
source_name, variable_name, container)
continue
elif len(files) > 1:
logger.warning('found more than one file in %s %s %s',
source_name, variable_name, container)
continue
filepath = os.path.join(variable_dir, container, files[0])
# Check if file is not empty
if os.path.getsize(filepath) < 128:
logger.warning('%s \n file is smaller than 128 Byte. It is probably'
' empty and will thus be skipped from reading',
filepath)
else:
logger.debug('reading data:\n\t '
'Source: %s\n\t '
'Variable: %s\n\t '
'Filename: %s',
source_name, variable_name, files[0])
update_progress(files_success, files_existing)
url = param_dict['web']
if source_name == 'OPSD':
data_to_add = read_opsd(filepath, url, headers)
elif source_name == 'CEPS':
data_to_add = read_ceps(filepath, variable_name, url, headers)
elif source_name == 'ENTSO-E Transparency FTP':
data_to_add = read_entso_e_transparency(
areas, filepath, variable_name, url, headers, res_key,
**param_dict)
elif source_name == 'ENTSO-E Data Portal':
data_to_add = read_entso_e_portal(filepath, url, headers)
elif source_name == 'ENTSO-E Power Statistics':
data_to_add = read_entso_e_statistics(filepath, url, headers)
elif source_name == 'Energinet.dk':
data_to_add = read_energinet_dk(filepath, url, headers)
elif source_name == 'Elia':
data_to_add = read_elia(filepath, variable_name, url, headers)
elif source_name == 'PSE':
data_to_add = read_pse(filepath, variable_name, url, headers)
elif source_name == 'RTE':
data_to_add = read_rte(filepath, variable_name, url, headers)
elif source_name == 'Svenska Kraftnaet':
data_to_add = read_svenska_kraftnaet(
filepath, variable_name, url, headers)
elif source_name == '50Hertz':
data_to_add = read_hertz(filepath, variable_name, url, headers)
elif source_name == 'Amprion':
data_to_add = read_amprion(
filepath, variable_name, url, headers)
elif source_name == 'TenneT':
data_to_add = read_tennet(
filepath, variable_name, url, headers)
elif source_name == 'TransnetBW':
data_to_add = read_transnetbw(
filepath, variable_name, url, headers)
elif source_name == 'APG':
data_to_add = read_apg(filepath, url, headers)
elif source_name == "Terna":
data_to_add = read_terna(filepath, url, headers)
if data_set.empty:
data_set = data_to_add
else:
data_set = data_set.combine_first(data_to_add)
files_success += 1
update_progress(files_success, files_existing)
if data_set.empty:
logger.warning('returned empty DataFrame for %s, %s',
source_name, variable_name)
return data_set
# Reindex with a new index that is sure to be continous in order to later
# expose gaps in the data.
no_gaps = pd.DatetimeIndex(start=data_set.index[0],
end=data_set.index[-1],
freq=res_key)
data_set = data_set.reindex(index=no_gaps)
# Cut off the data outside of [start_from_user:end_from_user]
# In order to make sure that the respective time period is covered in both
# UTC and CE(S)T, we set the start in CE(S)T, but the end in UTC
if start_from_user:
start_from_user = (
pytz.timezone('Europe/Brussels')
.localize(datetime.combine(start_from_user, time()))
.astimezone(pytz.timezone('UTC')))
if end_from_user:
end_from_user = (
pytz.timezone('UTC')
| |
- shape [len, batch]
##################################
programs_input = programs[:-1]
programs_target = programs[1:]
full_answers_input = full_answers[:-1]
full_answers_target = full_answers[1:]
# print("programs_input.size()", programs_input.size())
# print("programs_target.size()", programs_target.size())
# print("full_answers_input.size()", full_answers_input.size())
# print("full_answers_target.size()", full_answers_target.size())
# print("programs_input", programs_input)
# print("programs_target", programs_target)
# print("full_answers_input", full_answers_input)
# print("full_answers_target", full_answers_target)
##################################
# Forward training data
##################################
output = model(
questions,
gt_scene_graphs,
programs_input,
full_answers_input
)
programs_output, short_answer_logits = output
##################################
# Evaluate on training data
##################################
with torch.no_grad():
##################################
# Calculate Fast Evaluation for each module
##################################
this_short_answer_acc1 = accuracy(short_answer_logits, short_answer_label, topk=(1,))
short_answer_acc.update(this_short_answer_acc1[0].item(), this_batch_size)
text_pad_idx = GQATorchDataset.TEXT.vocab.stoi[GQATorchDataset.TEXT.pad_token]
##################################
# Convert output probability to top1 guess
# So that we could measure accuracy
##################################
programs_output_pred = programs_output.detach().topk(
k=1, dim=-1, largest=True, sorted=True
)[1].squeeze(-1)
# full_answers_output_pred = full_answers_output.detach().topk(
# k=1, dim=-1, largest=True, sorted=True
# )[1].squeeze(-1)
this_program_acc, this_program_group_acc, this_program_non_empty_acc = program_string_exact_match_acc(
programs_output_pred, programs_target,
padding_idx=text_pad_idx,
group_accuracy_WAY_NUM=GQATorchDataset.MAX_EXECUTION_STEP)
program_acc.update(this_program_acc, this_batch_size)
program_group_acc.update(this_program_group_acc, this_batch_size // GQATorchDataset.MAX_EXECUTION_STEP)
program_non_empty_acc.update(this_program_non_empty_acc, this_batch_size)
# this_full_answers_acc = string_exact_match_acc(
# full_answers_output_pred, full_answers_target, padding_idx=text_pad_idx
# )
# full_answer_acc.update(this_full_answers_acc, this_batch_size)
##################################
# Neural Execution Engine Bitmap loss
# ground truth stored at gt_scene_graphs.y
# using torch.nn.BCELoss - torch.nn.functional.binary_cross_entropy
# should also add a special precision recall for that
##################################
# execution_bitmap_loss = criterion['execution_bitmap'](execution_bitmap, gt_scene_graphs.y)
# precision, precision_div, recall, recall_div = bitmap_precision_recall(
# execution_bitmap, gt_scene_graphs.y, threshold=0.5
# )
# bitmap_precision.update(precision, precision_div)
# bitmap_recall.update(recall, recall_div)
##################################
# Calculate each module's loss and get global loss
##################################
def text_generation_loss(loss_fn, output, target):
text_vocab_size = len(GQATorchDataset.TEXT.vocab)
output = output.contiguous().view(-1, text_vocab_size)
target = target.contiguous().view(-1)
this_text_loss = loss_fn(output, target)
return this_text_loss
program_loss = text_generation_loss(criterion['program'], programs_output, programs_target)
# full_answer_loss = text_generation_loss(
# criterion['full_answer'], full_answers_output, full_answers_target
# )
##################################
# using sigmoid loss for short answer
##################################
# num_short_answer_choices = 1842
# short_answer_label_one_hot = torch.nn.functional.one_hot(short_answer_label, num_short_answer_choices).float()
# short_answer_loss = criterion['short_answer'](short_answer_logits, short_answer_label_one_hot) # sigmoid loss
##################################
# normal softmax loss for short answer
##################################
short_answer_loss = criterion['short_answer'](short_answer_logits, short_answer_label)
# loss = program_loss + full_answer_loss + short_answer_loss # + execution_bitmap_loss
loss = program_loss + short_answer_loss
# measure accuracy and record loss
losses.update(loss.item(), this_batch_size)
##################################
# compute gradient and do SGD step
##################################
optimizer.zero_grad()
loss.backward()
optimizer.step()
##################################
# measure elapsed time
##################################
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 or i == len(train_loader) - 1:
progress.display(i)
##################################
# Give final score
##################################
progress.display(batch=len(train_loader))
"""
Input shape: [Len, Batch]
A fast GPU-based string exact match accuracy calculator
TODO: if the prediction does not stop at target's padding area.
(should rarely happen if at all)
"""
def string_exact_match_acc(predictions, target, padding_idx=1):
##################################
# Do token-level match first
# Generate a matching matrix: if equals or pad, then put 1, else 0
# Shape: [Len, Batch]
##################################
target_len = target.size(0)
# truncated
predictions = predictions[:target_len]
char_match_matrix = (predictions == target).long()
# print("char_match_matrix", char_match_matrix)
cond_match_matrix = torch.where(target == padding_idx, torch.ones_like(target), char_match_matrix)
# print("cond_match_matrix", cond_match_matrix)
del char_match_matrix
##################################
# Reduction of token-level match
# 1 means exact match, 0 means at least one token not matching
# Dim: note that the first dim is len, batch is the second dim
##################################
# ret: (values, indices)
match_reduced, _ = torch.min(input=cond_match_matrix, dim=0, keepdim=False)
# print("match_reduced", match_reduced)
this_batch_size = target.size(1)
# print("this_batch_size", this_batch_size)
# mul 100, converting to percentage
accuracy = torch.sum(match_reduced).item() / this_batch_size * 100.0
return accuracy
"""
Input shape: [Len, Batch]
A fast GPU-based string exact match accuracy calculator
TODO: if the prediction does not stop at target's padding area.
(should rarely happen if at all)
group_accuracy_WAY_NUM: only calculated as correct if the whole group is correct.
Used in program accuracy: only correct if all instructions are correct.
-1 means ignore
"""
def program_string_exact_match_acc(predictions, target, padding_idx=1, group_accuracy_WAY_NUM=-1):
##################################
# Do token-level match first
# Generate a matching matrix: if equals or pad, then put 1, else 0
# Shape: [Len, Batch]
##################################
target_len = target.size(0)
# truncated
predictions = predictions[:target_len]
char_match_matrix = (predictions == target).long()
cond_match_matrix = torch.where(target == padding_idx, torch.ones_like(target), char_match_matrix)
del char_match_matrix
##################################
# Reduction of token-level match
# 1 means exact match, 0 means at least one token not matching
# Dim: note that the first dim is len, batch is the second dim
##################################
# ret: (values, indices)
match_reduced, _ = torch.min(input=cond_match_matrix, dim=0, keepdim=False)
this_batch_size = target.size(1)
# mul 100, converting to percentage
accuracy = torch.sum(match_reduced).item() / this_batch_size * 100.0
##################################
# Calculate Batch Accuracy
##################################
group_batch_size = this_batch_size // group_accuracy_WAY_NUM
match_reduced_group_reshape = match_reduced.view(group_batch_size, group_accuracy_WAY_NUM)
# print("match_reduced_group_reshape", match_reduced_group_reshape)
# ret: (values, indices)
group_match_reduced, _ = torch.min(input=match_reduced_group_reshape, dim=1, keepdim=False)
# print("group_match_reduced", group_match_reduced)
# mul 100, converting to percentage
group_accuracy = torch.sum(group_match_reduced).item() / group_batch_size * 100.0
##################################
# Calculate Empty
# start of sentence, end of sentence, padding
# Shape: [Len=2, Batch]
##################################
# empty and counted as correct
empty_instr_flag = (target[2] == padding_idx) & match_reduced.bool()
empty_instr_flag = empty_instr_flag.long()
# print("empty_instr_flag", empty_instr_flag)
empty_count = torch.sum(empty_instr_flag).item()
# print("empty_count", empty_count)
non_empty_accuracy = (torch.sum(match_reduced).item() - empty_count) / (this_batch_size - empty_count) * 100.0
##################################
# Return
##################################
return accuracy, group_accuracy , non_empty_accuracy
def validate(val_loader, model, criterion, args, FAST_VALIDATE_FLAG=False, DUMP_RESULT=False):
batch_time = AverageMeter('Time', ':6.3f')
program_acc = AverageMeter('Acc@Program', ':6.2f')
program_group_acc = AverageMeter('Acc@ProgramGroup', ':4.2f')
program_non_empty_acc = AverageMeter('Acc@ProgramNonEmpty', ':4.2f')
# bitmap_precision = AverageMeter('Precision@Bitmap', ':4.2f')
# bitmap_recall = AverageMeter('Recall@Bitmap', ':4.2f')
# full_answer_acc = AverageMeter('Acc@Full', ':6.2f')
short_answer_acc = AverageMeter('Acc@Short', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[
batch_time, program_acc,
program_group_acc, program_non_empty_acc,
short_answer_acc
],
prefix='Test: '
)
# switch to evaluate mode
model.eval()
if DUMP_RESULT:
quesid2ans = {}
with torch.no_grad():
end = time.time()
for i, (data_batch) in enumerate(val_loader):
questionID, questions, gt_scene_graphs, programs, full_answers, short_answer_label, types = data_batch
questions, gt_scene_graphs, programs, full_answers, short_answer_label = [
datum.to(device=cuda, non_blocking=True) for datum in [
questions, gt_scene_graphs, programs, full_answers, short_answer_label
]
]
this_batch_size = questions.size(1)
if FAST_VALIDATE_FLAG:
raise NotImplementedError("Should not use fast validation. Only for short answer accuracy")
##################################
# Prepare training input and training target for text generation
##################################
programs_input = programs[:-1]
programs_target = programs[1:]
full_answers_input = full_answers[:-1]
full_answers_target = full_answers[1:]
##################################
# Forward evaluate data
##################################
output = model(
questions,
gt_scene_graphs,
programs_input,
full_answers_input
)
programs_output, short_answer_logits = output
##################################
# Convert output probability to top1 guess
# So that we could measure accuracy
##################################
programs_output_pred = programs_output.detach().topk(
k=1, dim=-1, largest=True, sorted=True
)[1].squeeze(-1)
# full_answers_output_pred = full_answers_output.detach().topk(
# k=1, dim=-1, largest=True, sorted=True
# )[1].squeeze(-1)
else:
programs_target = programs
full_answers_target = full_answers
##################################
# Greedy decoding-based evaluation
##################################
output = model(
questions,
gt_scene_graphs,
None,
None,
SAMPLE_FLAG=True
)
programs_output_pred, short_answer_logits = output
##################################
# Neural Execution Engine Bitmap loss
# ground truth stored at gt_scene_graphs.y
# using torch.nn.BCELoss - torch.nn.functional.binary_cross_entropy
##################################
# precision, precision_div, recall, recall_div = bitmap_precision_recall(
# execution_bitmap, gt_scene_graphs.y, threshold=0.5
# )
# bitmap_precision.update(precision, precision_div)
# bitmap_recall.update(recall, recall_div)
##################################
# Calculate Fast Evaluation for each module
##################################
this_short_answer_acc1 = accuracy(short_answer_logits.detach(), short_answer_label, topk=(1,))
short_answer_acc.update(this_short_answer_acc1[0].item(), this_batch_size)
text_pad_idx = GQATorchDataset.TEXT.vocab.stoi[GQATorchDataset.TEXT.pad_token]
this_program_acc, this_program_group_acc, this_program_non_empty_acc = program_string_exact_match_acc(
programs_output_pred, programs_target,
padding_idx=text_pad_idx,
group_accuracy_WAY_NUM=GQATorchDataset.MAX_EXECUTION_STEP
)
program_acc.update(this_program_acc, this_batch_size)
program_group_acc.update(this_program_group_acc, this_batch_size // GQATorchDataset.MAX_EXECUTION_STEP)
program_non_empty_acc.update(this_program_non_empty_acc, this_batch_size)
# this_full_answers_acc = string_exact_match_acc(
# full_answers_output_pred.detach(), full_answers_target, padding_idx=text_pad_idx
# )
# full_answer_acc.update(this_full_answers_acc, this_batch_size)
##################################
# Example Visualization from the first batch
##################################
if i == 0 and True:
for batch_idx in range(min(this_batch_size, 128)):
##################################
# print Question and Question ID
##################################
question = questions[:, batch_idx].cpu()
question_sent, _ = GQATorchDataset.indices_to_string(question, True)
print("Question({}) QID({}):".format(batch_idx, questionID[batch_idx]), question_sent)
if utils.is_main_process():
logging.info("Question({}) QID({}): {}".format(batch_idx, questionID[batch_idx], question_sent))
##################################
# print program prediction
##################################
for instr_idx in range(GQATorchDataset.MAX_EXECUTION_STEP):
true_batch_idx = instr_idx + GQATorchDataset.MAX_EXECUTION_STEP * batch_idx
gt = programs[:, true_batch_idx].cpu()
pred = programs_output_pred[:, true_batch_idx]
pred_sent, _ = GQATorchDataset.indices_to_string(pred, True)
gt_sent, _ = GQATorchDataset.indices_to_string(gt, True)
if len(pred_sent) == 0 and len(gt_sent) == 0:
# skip if both target and prediciton are empty
continue
# gt_caption
print(
"Generated Program ({}): ".format(true_batch_idx), pred_sent,
" Ground Truth Program ({}):".format(true_batch_idx), gt_sent
)
if utils.is_main_process():
# gt_caption
logging.info("Generated Program ({}): {} Ground Truth Program ({}): {}".format(
true_batch_idx, pred_sent, true_batch_idx, gt_sent
))
##################################
# print full answer prediction
##################################
# gt = full_answers[:, batch_idx].cpu()
# pred = full_answers_output_pred[:, batch_idx]
# pred_sent, _ = GQATorchDataset.indices_to_string(pred, True)
# gt_sent, _ = GQATorchDataset.indices_to_string(gt, True)
# # gt_caption
# print(
# "Generated Full Answer ({}): ".format(batch_idx), pred_sent,
# "Ground Truth Full Answer ({}):".format(batch_idx), gt_sent
# )
# if utils.is_main_process():
# # gt_caption
# logging.info("Generated Full Answer ({}): {} | |
= np.argmin(dis, axis=1)
for attack_ind, track_id in enumerate(dets_ids):
if track_id is None or self.multiple_ori_ids[track_id] <= self.FRAME_THR \
or dets_ids[ious_inds[attack_ind]] not in self.multiple_ori2att \
or track_id not in self.multiple_ori2att:
continue
if ious[attack_ind, ious_inds[attack_ind]] > self.ATTACK_IOU_THR or (
track_id in self.low_iou_ids and ious[attack_ind, ious_inds[attack_ind]] > 0
):
attack_ids.append(track_id)
target_ids.append(dets_ids[ious_inds[attack_ind]])
attack_inds.append(attack_ind)
target_inds.append(ious_inds[attack_ind])
if hasattr(self, f'temp_i_{track_id}'):
self.__setattr__(f'temp_i_{track_id}', 0)
elif ious[attack_ind, ious_inds[attack_ind]] == 0 and track_id in self.low_iou_ids:
if hasattr(self, f'temp_i_{track_id}'):
self.__setattr__(f'temp_i_{track_id}', self.__getattribute__(f'temp_i_{track_id}') + 1)
else:
self.__setattr__(f'temp_i_{track_id}', 1)
if self.__getattribute__(f'temp_i_{track_id}') > 10:
self.low_iou_ids.remove(track_id)
elif dets_ids[dis_inds[attack_ind]] in self.multiple_ori2att:
attack_ids.append(track_id)
target_ids.append(dets_ids[dis_inds[attack_ind]])
attack_inds.append(attack_ind)
target_inds.append(dis_inds[attack_ind])
fit_index = self.CheckFit(dets, scores_keep, dets_second, scores_second, attack_ids, attack_inds) if len(
attack_ids) else []
if fit_index:
attack_ids = np.array(attack_ids)[fit_index]
target_ids = np.array(target_ids)[fit_index]
attack_inds = np.array(attack_inds)[fit_index]
target_inds = np.array(target_inds)[fit_index]
noise, attack_iter, suc = self.attack_mt_det(
imgs,
img_info,
dets,
dets_second,
outputs_index_1,
outputs_index_2,
last_info=self.ad_last_info,
outputs_ori=outputs,
attack_ids=attack_ids,
attack_inds=attack_inds,
target_ids=target_ids,
target_inds=target_inds
)
self.low_iou_ids.update(set(attack_ids))
if suc:
self.attacked_ids.update(set(attack_ids))
print(
f'attack ids: {attack_ids}\tattack frame {self.frame_id_}: SUCCESS\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
print(
f'attack ids: {attack_ids}\tattack frame {self.frame_id_}: FAIL\tl2 distance: {(noise ** 2).sum().sqrt().item() if noise is not None else None}\titeration: {attack_iter}')
adImg = cv2.imread(os.path.join(self.args.img_dir, img_info[-1][0]))
if adImg is None:
import pdb;
pdb.set_trace()
if noise is not None:
l2_dis = (noise ** 2).sum().sqrt().item()
imgs = (imgs + noise)
imgs[0, 0] = torch.clip(imgs[0, 0], min=-0.485 / 0.229, max=(1 - 0.485) / 0.229)
imgs[0, 1] = torch.clip(imgs[0, 1], min=-0.456 / 0.224, max=(1 - 0.456) / 0.224)
imgs[0, 2] = torch.clip(imgs[0, 2], min=-0.406 / 0.225, max=(1 - 0.406) / 0.225)
imgs = imgs.data
noise = self.recoverNoise(noise, adImg)
adImg = np.clip(adImg + noise, a_min=0, a_max=255)
noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))
noise = (noise * 255).astype(np.uint8)
else:
l2_dis = None
output_stracks_att = self.update(imgs, img_info, img_size, [], ids, track_id=None)
output_stracks_att_ind = []
for ind, track in enumerate(output_stracks_att):
if track.track_id not in self.multiple_att_ids:
self.multiple_att_ids[track.track_id] = 0
self.multiple_att_ids[track.track_id] += 1
if self.multiple_att_ids[track.track_id] <= self.FRAME_THR:
output_stracks_att_ind.append(ind)
if len(output_stracks_ori_ind) and len(output_stracks_att_ind):
ori_dets = [track.curr_tlbr for i, track in enumerate(output_stracks_ori) if i in output_stracks_ori_ind]
att_dets = [track.curr_tlbr for i, track in enumerate(output_stracks_att) if i in output_stracks_att_ind]
ori_dets = np.stack(ori_dets).astype(np.float64)
att_dets = np.stack(att_dets).astype(np.float64)
ious = bbox_ious(ori_dets, att_dets)
row_ind, col_ind = linear_sum_assignment(-ious)
for i in range(len(row_ind)):
if ious[row_ind[i], col_ind[i]] > 0.9:
ori_id = output_stracks_ori[output_stracks_ori_ind[row_ind[i]]].track_id
att_id = output_stracks_att[output_stracks_att_ind[col_ind[i]]].track_id
self.multiple_ori2att[ori_id] = att_id
return output_stracks_ori, output_stracks_att, adImg, noise, l2_dis
def update_attack_mt(self, imgs, img_info, img_size, data_list, ids, **kwargs):
self.frame_id_ += 1
activated_starcks = []
refind_stracks = []
lost_stracks = []
removed_stracks = []
imgs.requires_grad = True
# model_2 = copy.deepcopy(self.model_2)
self.model_2.zero_grad()
outputs = self.model_2(imgs)
if self.decoder is not None:
outputs = self.decoder(outputs, dtype=outputs.type())
outputs_post, outputs_index = postprocess(outputs.detach(), self.num_classes, self.confthre, self.nmsthre)
output_results = self.convert_to_coco_format([outputs_post[0].detach()], img_info, ids)
data_list.extend(output_results)
output_results = outputs_post[0]
outputs = outputs[0]
if output_results.shape[1] == 5:
scores = output_results[:, 4]
bboxes = output_results[:, :4]
else:
output_results = output_results.detach().cpu().numpy()
scores = output_results[:, 4] * output_results[:, 5]
bboxes = output_results[:, :4] # x1y1x2y2
img_h, img_w = img_info[0], img_info[1]
scale = min(img_size[0] / float(img_h), img_size[1] / float(img_w))
bboxes /= scale
remain_inds = scores > self.args.track_thresh
inds_low = scores > 0.1
inds_high = scores < self.args.track_thresh
inds_second = np.logical_and(inds_low, inds_high)
dets_second = bboxes[inds_second]
dets = bboxes[remain_inds]
scores_keep = scores[remain_inds]
scores_second = scores[inds_second]
outputs_index_1 = outputs_index[remain_inds]
outputs_index_2 = outputs_index[inds_second]
dets_ids = [None for _ in range(len(dets) + len(dets_second))]
if len(dets) > 0:
'''Detections'''
detections = [STrack(STrack.tlbr_to_tlwh(tlbr), s) for
(tlbr, s) in zip(dets, scores_keep)]
else:
detections = []
''' Add newly detected tracklets to tracked_stracks'''
unconfirmed = []
tracked_stracks = [] # type: list[STrack]
for track in self.tracked_stracks_:
if not track.is_activated:
unconfirmed.append(track)
else:
tracked_stracks.append(track)
''' Step 2: First association, with high score detection boxes'''
strack_pool = joint_stracks(tracked_stracks, self.lost_stracks_)
# Predict the current location with KF
STrack.multi_predict(strack_pool)
dists = matching.iou_distance(strack_pool, detections)
if not self.args.mot20:
dists = matching.fuse_score(dists, detections)
matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.args.match_thresh)
for itracked, idet in matches:
track = strack_pool[itracked]
det = detections[idet]
if track.state == TrackState.Tracked:
track.update(detections[idet], self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[idet] = track.track_id
''' Step 3: Second association, with low score detection boxes'''
# association the untrack to the low score detections
if len(dets_second) > 0:
'''Detections'''
detections_second = [STrack(STrack.tlbr_to_tlwh(tlbr), s) for
(tlbr, s) in zip(dets_second, scores_second)]
else:
detections_second = []
r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
dists = matching.iou_distance(r_tracked_stracks, detections_second)
matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5)
for itracked, idet in matches:
track = r_tracked_stracks[itracked]
det = detections_second[idet]
if track.state == TrackState.Tracked:
track.update(det, self.frame_id_)
activated_starcks.append(track)
else:
track.re_activate_(det, self.frame_id_, new_id=False)
refind_stracks.append(track)
dets_ids[idet + len(dets)] = track.track_id
for it in u_track:
track = r_tracked_stracks[it]
if not track.state == TrackState.Lost:
track.mark_lost()
lost_stracks.append(track)
'''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
detections = [detections[i] for i in u_detection]
dists = matching.iou_distance(unconfirmed, detections)
if not self.args.mot20:
dists = matching.fuse_score(dists, detections)
matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
for itracked, idet in matches:
unconfirmed[itracked].update(detections[idet], self.frame_id_)
activated_starcks.append(unconfirmed[itracked])
for it in u_unconfirmed:
track = unconfirmed[it]
track.mark_removed()
removed_stracks.append(track)
""" Step 4: Init new stracks"""
for inew in u_detection:
track = detections[inew]
if track.score < self.det_thresh:
continue
track.activate_(self.kalman_filter, self.frame_id_)
activated_starcks.append(track)
""" Step 5: Update state"""
for track in self.lost_stracks_:
if self.frame_id_ - track.end_frame > self.max_time_lost:
track.mark_removed()
removed_stracks.append(track)
# print('Ramained match {} s'.format(t4-t3))
self.tracked_stracks_ = [t for t in self.tracked_stracks_ if t.state == TrackState.Tracked]
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, activated_starcks)
self.tracked_stracks_ = joint_stracks(self.tracked_stracks_, refind_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.tracked_stracks_)
self.lost_stracks_.extend(lost_stracks)
self.lost_stracks_ = sub_stracks(self.lost_stracks_, self.removed_stracks_)
self.removed_stracks_.extend(removed_stracks)
self.tracked_stracks_, self.lost_stracks_ = remove_duplicate_stracks(self.tracked_stracks_, self.lost_stracks_)
# get scores of lost tracks
dets_ = np.concatenate([dets, dets_second])
output_stracks_ori = [track for track in self.tracked_stracks_ if track.is_activated]
id_set = set([track.track_id for track in output_stracks_ori])
for i in range(len(dets_ids)):
if dets_ids[i] is not None and dets_ids[i] not in id_set:
dets_ids[i] = None
output_stracks_ori_ind = []
for ind, track in enumerate(output_stracks_ori):
if track.track_id not in self.multiple_ori_ids:
self.multiple_ori_ids[track.track_id] = 0
self.multiple_ori_ids[track.track_id] += 1
if self.multiple_ori_ids[track.track_id] <= self.FRAME_THR:
output_stracks_ori_ind.append(ind)
noise = None
attack_ids = []
target_ids = []
attack_inds = []
target_inds = []
if len(dets_) > 0:
ious = bbox_ious(np.ascontiguousarray(dets_[:, :4], dtype=np.float64),
np.ascontiguousarray(dets_[:, :4], dtype=np.float64))
ious[range(len(dets_)), range(len(dets_))] = 0
ious_inds = np.argmax(ious, axis=1)
dis = bbox_dis(np.ascontiguousarray(dets_[:, :4], dtype=np.float64),
np.ascontiguousarray(dets_[:, :4], dtype=np.float64))
dis[range(len(dets_)), range(len(dets_))] = np.inf
dis_inds = np.argmin(dis, axis=1)
for attack_ind, track_id in enumerate(dets_ids):
if track_id is None or self.multiple_ori_ids[track_id] <= self.FRAME_THR \
or dets_ids[ious_inds[attack_ind]] not in self.multiple_ori2att \
or track_id not in self.multiple_ori2att:
continue
if ious[attack_ind, ious_inds[attack_ind]] > self.ATTACK_IOU_THR or (
track_id in self.low_iou_ids and ious[attack_ind, ious_inds[attack_ind]] > 0
):
attack_ids.append(track_id)
target_ids.append(dets_ids[ious_inds[attack_ind]])
attack_inds.append(attack_ind)
target_inds.append(ious_inds[attack_ind])
if hasattr(self, f'temp_i_{track_id}'):
self.__setattr__(f'temp_i_{track_id}', 0)
elif ious[attack_ind, ious_inds[attack_ind]] == 0 and track_id in self.low_iou_ids:
if hasattr(self, f'temp_i_{track_id}'):
self.__setattr__(f'temp_i_{track_id}', self.__getattribute__(f'temp_i_{track_id}') + 1)
else:
self.__setattr__(f'temp_i_{track_id}', 1)
if self.__getattribute__(f'temp_i_{track_id}') > 10:
self.low_iou_ids.remove(track_id)
elif dets_ids[dis_inds[attack_ind]] in self.multiple_ori2att:
attack_ids.append(track_id)
target_ids.append(dets_ids[dis_inds[attack_ind]])
attack_inds.append(attack_ind)
target_inds.append(dis_inds[attack_ind])
fit_index = self.CheckFit(dets, scores_keep, dets_second, scores_second, attack_ids, attack_inds) if len(
attack_ids) else []
if fit_index:
attack_ids = np.array(attack_ids)[fit_index]
target_ids = np.array(target_ids)[fit_index]
attack_inds = np.array(attack_inds)[fit_index]
target_inds = np.array(target_inds)[fit_index]
if self.args.rand:
noise, attack_iter, suc = self.attack_mt_random(
imgs,
img_info,
dets,
dets_second,
outputs_index_1,
outputs_index_2,
last_info,
outputs_ori,
attack_ids,
attack_inds,
target_ids,
target_inds
)
else:
noise, attack_iter, suc = self.attack_mt(
imgs,
img_info,
dets,
dets_second,
outputs_index_1,
outputs_index_2,
last_info=self.ad_last_info,
outputs_ori=outputs,
attack_ids=attack_ids,
attack_inds=attack_inds,
target_ids=target_ids,
target_inds=target_inds
)
self.low_iou_ids.update(set(attack_ids))
if suc:
self.attacked_ids.update(set(attack_ids))
print(
f'attack ids: {attack_ids}\tattack frame {self.frame_id_}: SUCCESS\tl2 distance: {(noise ** 2).sum().sqrt().item()}\titeration: {attack_iter}')
else:
print(
f'attack ids: {attack_ids}\tattack frame {self.frame_id_}: FAIL\tl2 distance: {(noise ** 2).sum().sqrt().item() if noise is not None else None}\titeration: {attack_iter}')
adImg = cv2.imread(os.path.join(self.args.img_dir, img_info[-1][0]))
if adImg is None:
import pdb;
pdb.set_trace()
if noise is not None:
l2_dis = (noise ** 2).sum().sqrt().item()
imgs = (imgs + noise)
imgs[0, 0] = torch.clip(imgs[0, 0], min=-0.485 / 0.229, max=(1 - 0.485) / 0.229)
imgs[0, 1] = torch.clip(imgs[0, 1], min=-0.456 / 0.224, max=(1 - 0.456) / 0.224)
imgs[0, 2] = torch.clip(imgs[0, 2], min=-0.406 / 0.225, max=(1 - 0.406) / 0.225)
imgs = imgs.data
noise = self.recoverNoise(noise, adImg)
adImg = np.clip(adImg + noise, a_min=0, a_max=255)
noise = (noise - np.min(noise)) / (np.max(noise) - np.min(noise))
noise = (noise * 255).astype(np.uint8)
else:
l2_dis = None
output_stracks_att = self.update(imgs, img_info, img_size, [], ids, track_id=None)
output_stracks_att_ind = []
for ind, track in enumerate(output_stracks_att):
if track.track_id not in self.multiple_att_ids:
self.multiple_att_ids[track.track_id] = 0
self.multiple_att_ids[track.track_id] | |
Constraint(expr=m.x186*m.x2516 + m.x811*m.x2522 + m.x1436*m.x2528 + m.x2061*m.x2534 <= 8)
m.c2705 = Constraint(expr=m.x187*m.x2516 + m.x812*m.x2522 + m.x1437*m.x2528 + m.x2062*m.x2534 <= 8)
m.c2706 = Constraint(expr=m.x188*m.x2516 + m.x813*m.x2522 + m.x1438*m.x2528 + m.x2063*m.x2534 <= 8)
m.c2707 = Constraint(expr=m.x189*m.x2516 + m.x814*m.x2522 + m.x1439*m.x2528 + m.x2064*m.x2534 <= 8)
m.c2708 = Constraint(expr=m.x190*m.x2516 + m.x815*m.x2522 + m.x1440*m.x2528 + m.x2065*m.x2534 <= 8)
m.c2709 = Constraint(expr=m.x191*m.x2516 + m.x816*m.x2522 + m.x1441*m.x2528 + m.x2066*m.x2534 <= 8)
m.c2710 = Constraint(expr=m.x192*m.x2516 + m.x817*m.x2522 + m.x1442*m.x2528 + m.x2067*m.x2534 <= 8)
m.c2711 = Constraint(expr=m.x193*m.x2516 + m.x818*m.x2522 + m.x1443*m.x2528 + m.x2068*m.x2534 <= 8)
m.c2712 = Constraint(expr=m.x194*m.x2516 + m.x819*m.x2522 + m.x1444*m.x2528 + m.x2069*m.x2534 <= 8)
m.c2713 = Constraint(expr=m.x195*m.x2516 + m.x820*m.x2522 + m.x1445*m.x2528 + m.x2070*m.x2534 <= 8)
m.c2714 = Constraint(expr=m.x196*m.x2516 + m.x821*m.x2522 + m.x1446*m.x2528 + m.x2071*m.x2534 <= 8)
m.c2715 = Constraint(expr=m.x197*m.x2516 + m.x822*m.x2522 + m.x1447*m.x2528 + m.x2072*m.x2534 <= 8)
m.c2716 = Constraint(expr=m.x198*m.x2516 + m.x823*m.x2522 + m.x1448*m.x2528 + m.x2073*m.x2534 <= 8)
m.c2717 = Constraint(expr=m.x199*m.x2516 + m.x824*m.x2522 + m.x1449*m.x2528 + m.x2074*m.x2534 <= 8)
m.c2718 = Constraint(expr=m.x200*m.x2516 + m.x825*m.x2522 + m.x1450*m.x2528 + m.x2075*m.x2534 <= 8)
m.c2719 = Constraint(expr=m.x201*m.x2516 + m.x826*m.x2522 + m.x1451*m.x2528 + m.x2076*m.x2534 <= 8)
m.c2720 = Constraint(expr=m.x202*m.x2516 + m.x827*m.x2522 + m.x1452*m.x2528 + m.x2077*m.x2534 <= 8)
m.c2721 = Constraint(expr=m.x203*m.x2516 + m.x828*m.x2522 + m.x1453*m.x2528 + m.x2078*m.x2534 <= 8)
m.c2722 = Constraint(expr=m.x204*m.x2516 + m.x829*m.x2522 + m.x1454*m.x2528 + m.x2079*m.x2534 <= 8)
m.c2723 = Constraint(expr=m.x205*m.x2516 + m.x830*m.x2522 + m.x1455*m.x2528 + m.x2080*m.x2534 <= 8)
m.c2724 = Constraint(expr=m.x206*m.x2516 + m.x831*m.x2522 + m.x1456*m.x2528 + m.x2081*m.x2534 <= 8)
m.c2725 = Constraint(expr=m.x207*m.x2516 + m.x832*m.x2522 + m.x1457*m.x2528 + m.x2082*m.x2534 <= 8)
m.c2726 = Constraint(expr=m.x208*m.x2516 + m.x833*m.x2522 + m.x1458*m.x2528 + m.x2083*m.x2534 <= 8)
m.c2727 = Constraint(expr=m.x209*m.x2516 + m.x834*m.x2522 + m.x1459*m.x2528 + m.x2084*m.x2534 <= 8)
m.c2728 = Constraint(expr=m.x210*m.x2516 + m.x835*m.x2522 + m.x1460*m.x2528 + m.x2085*m.x2534 <= 8)
m.c2729 = Constraint(expr=m.x211*m.x2516 + m.x836*m.x2522 + m.x1461*m.x2528 + m.x2086*m.x2534 <= 8)
m.c2730 = Constraint(expr=m.x212*m.x2516 + m.x837*m.x2522 + m.x1462*m.x2528 + m.x2087*m.x2534 <= 8)
m.c2731 = Constraint(expr=m.x213*m.x2516 + m.x838*m.x2522 + m.x1463*m.x2528 + m.x2088*m.x2534 <= 8)
m.c2732 = Constraint(expr=m.x214*m.x2516 + m.x839*m.x2522 + m.x1464*m.x2528 + m.x2089*m.x2534 <= 8)
m.c2733 = Constraint(expr=m.x215*m.x2516 + m.x840*m.x2522 + m.x1465*m.x2528 + m.x2090*m.x2534 <= 8)
m.c2734 = Constraint(expr=m.x216*m.x2516 + m.x841*m.x2522 + m.x1466*m.x2528 + m.x2091*m.x2534 <= 8)
m.c2735 = Constraint(expr=m.x217*m.x2516 + m.x842*m.x2522 + m.x1467*m.x2528 + m.x2092*m.x2534 <= 8)
m.c2736 = Constraint(expr=m.x218*m.x2516 + m.x843*m.x2522 + m.x1468*m.x2528 + m.x2093*m.x2534 <= 8)
m.c2737 = Constraint(expr=m.x219*m.x2516 + m.x844*m.x2522 + m.x1469*m.x2528 + m.x2094*m.x2534 <= 8)
m.c2738 = Constraint(expr=m.x220*m.x2516 + m.x845*m.x2522 + m.x1470*m.x2528 + m.x2095*m.x2534 <= 8)
m.c2739 = Constraint(expr=m.x221*m.x2516 + m.x846*m.x2522 + m.x1471*m.x2528 + m.x2096*m.x2534 <= 8)
m.c2740 = Constraint(expr=m.x222*m.x2516 + m.x847*m.x2522 + m.x1472*m.x2528 + m.x2097*m.x2534 <= 8)
m.c2741 = Constraint(expr=m.x223*m.x2516 + m.x848*m.x2522 + m.x1473*m.x2528 + m.x2098*m.x2534 <= 8)
m.c2742 = Constraint(expr=m.x224*m.x2516 + m.x849*m.x2522 + m.x1474*m.x2528 + m.x2099*m.x2534 <= 8)
m.c2743 = Constraint(expr=m.x225*m.x2516 + m.x850*m.x2522 + m.x1475*m.x2528 + m.x2100*m.x2534 <= 8)
m.c2744 = Constraint(expr=m.x226*m.x2516 + m.x851*m.x2522 + m.x1476*m.x2528 + m.x2101*m.x2534 <= 8)
m.c2745 = Constraint(expr=m.x227*m.x2516 + m.x852*m.x2522 + m.x1477*m.x2528 + m.x2102*m.x2534 <= 8)
m.c2746 = Constraint(expr=m.x228*m.x2516 + m.x853*m.x2522 + m.x1478*m.x2528 + m.x2103*m.x2534 <= 8)
m.c2747 = Constraint(expr=m.x229*m.x2516 + m.x854*m.x2522 + m.x1479*m.x2528 + m.x2104*m.x2534 <= 8)
m.c2748 = Constraint(expr=m.x230*m.x2516 + m.x855*m.x2522 + m.x1480*m.x2528 + m.x2105*m.x2534 <= 8)
m.c2749 = Constraint(expr=m.x231*m.x2516 + m.x856*m.x2522 + m.x1481*m.x2528 + m.x2106*m.x2534 <= 8)
m.c2750 = Constraint(expr=m.x232*m.x2516 + m.x857*m.x2522 + m.x1482*m.x2528 + m.x2107*m.x2534 <= 8)
m.c2751 = Constraint(expr=m.x233*m.x2516 + m.x858*m.x2522 + m.x1483*m.x2528 + m.x2108*m.x2534 <= 8)
m.c2752 = Constraint(expr=m.x234*m.x2516 + m.x859*m.x2522 + m.x1484*m.x2528 + m.x2109*m.x2534 <= 8)
m.c2753 = Constraint(expr=m.x235*m.x2516 + m.x860*m.x2522 + m.x1485*m.x2528 + m.x2110*m.x2534 <= 8)
m.c2754 = Constraint(expr=m.x236*m.x2516 + m.x861*m.x2522 + m.x1486*m.x2528 + m.x2111*m.x2534 <= 8)
m.c2755 = Constraint(expr=m.x237*m.x2516 + m.x862*m.x2522 + m.x1487*m.x2528 + m.x2112*m.x2534 <= 8)
m.c2756 = Constraint(expr=m.x238*m.x2516 + m.x863*m.x2522 + m.x1488*m.x2528 + m.x2113*m.x2534 <= 8)
m.c2757 = Constraint(expr=m.x239*m.x2516 + m.x864*m.x2522 + m.x1489*m.x2528 + m.x2114*m.x2534 <= 8)
m.c2758 = Constraint(expr=m.x240*m.x2516 + m.x865*m.x2522 + m.x1490*m.x2528 + m.x2115*m.x2534 <= 8)
m.c2759 = Constraint(expr=m.x241*m.x2516 + m.x866*m.x2522 + m.x1491*m.x2528 + m.x2116*m.x2534 <= 8)
m.c2760 = Constraint(expr=m.x242*m.x2516 + m.x867*m.x2522 + m.x1492*m.x2528 + m.x2117*m.x2534 <= 8)
m.c2761 = Constraint(expr=m.x243*m.x2516 + m.x868*m.x2522 + m.x1493*m.x2528 + m.x2118*m.x2534 <= 8)
m.c2762 = Constraint(expr=m.x244*m.x2516 + m.x869*m.x2522 + m.x1494*m.x2528 + m.x2119*m.x2534 <= 8)
m.c2763 = Constraint(expr=m.x245*m.x2516 + m.x870*m.x2522 + m.x1495*m.x2528 + m.x2120*m.x2534 <= 8)
m.c2764 = Constraint(expr=m.x246*m.x2516 + m.x871*m.x2522 + m.x1496*m.x2528 + m.x2121*m.x2534 <= 8)
m.c2765 = Constraint(expr=m.x247*m.x2516 + m.x872*m.x2522 + m.x1497*m.x2528 + m.x2122*m.x2534 <= 8)
m.c2766 = Constraint(expr=m.x248*m.x2516 + m.x873*m.x2522 + m.x1498*m.x2528 + m.x2123*m.x2534 <= 8)
m.c2767 = Constraint(expr=m.x249*m.x2516 + m.x874*m.x2522 + m.x1499*m.x2528 + m.x2124*m.x2534 <= 8)
m.c2768 = Constraint(expr=m.x250*m.x2516 + m.x875*m.x2522 + m.x1500*m.x2528 + m.x2125*m.x2534 <= 8)
m.c2769 = Constraint(expr=m.x251*m.x2516 + m.x876*m.x2522 + m.x1501*m.x2528 + m.x2126*m.x2534 <= 8)
m.c2770 = Constraint(expr=m.x252*m.x2516 + m.x877*m.x2522 + m.x1502*m.x2528 + m.x2127*m.x2534 <= 8)
m.c2771 = Constraint(expr=m.x253*m.x2516 + m.x878*m.x2522 + m.x1503*m.x2528 + m.x2128*m.x2534 <= 8)
m.c2772 = Constraint(expr=m.x254*m.x2516 + m.x879*m.x2522 + m.x1504*m.x2528 + m.x2129*m.x2534 <= 8)
m.c2773 = Constraint(expr=m.x255*m.x2516 + m.x880*m.x2522 + m.x1505*m.x2528 + m.x2130*m.x2534 <= 8)
m.c2774 = Constraint(expr=m.x256*m.x2516 + m.x881*m.x2522 + m.x1506*m.x2528 + m.x2131*m.x2534 <= 8)
m.c2775 = Constraint(expr=m.x257*m.x2516 + m.x882*m.x2522 + m.x1507*m.x2528 + m.x2132*m.x2534 <= 8)
m.c2776 = Constraint(expr=m.x258*m.x2516 + m.x883*m.x2522 + m.x1508*m.x2528 + m.x2133*m.x2534 <= 8)
m.c2777 = Constraint(expr=m.x259*m.x2516 + m.x884*m.x2522 + m.x1509*m.x2528 + m.x2134*m.x2534 <= 8)
m.c2778 = Constraint(expr=m.x260*m.x2516 + m.x885*m.x2522 + m.x1510*m.x2528 + m.x2135*m.x2534 <= 8)
m.c2779 = Constraint(expr=m.x261*m.x2516 + m.x886*m.x2522 + m.x1511*m.x2528 + m.x2136*m.x2534 <= 8)
m.c2780 = Constraint(expr=m.x262*m.x2516 + m.x887*m.x2522 + m.x1512*m.x2528 + m.x2137*m.x2534 <= 8)
m.c2781 = Constraint(expr=m.x263*m.x2516 + m.x888*m.x2522 + m.x1513*m.x2528 + m.x2138*m.x2534 <= 8)
m.c2782 = Constraint(expr=m.x264*m.x2516 + m.x889*m.x2522 + m.x1514*m.x2528 + m.x2139*m.x2534 <= 8)
m.c2783 = Constraint(expr=m.x265*m.x2516 + m.x890*m.x2522 + m.x1515*m.x2528 + m.x2140*m.x2534 <= 8)
m.c2784 = Constraint(expr=m.x266*m.x2516 + m.x891*m.x2522 + m.x1516*m.x2528 + m.x2141*m.x2534 <= 8)
m.c2785 = Constraint(expr=m.x267*m.x2516 + m.x892*m.x2522 + m.x1517*m.x2528 + m.x2142*m.x2534 <= 8)
m.c2786 = Constraint(expr=m.x268*m.x2516 + m.x893*m.x2522 + m.x1518*m.x2528 + m.x2143*m.x2534 <= 8)
m.c2787 = Constraint(expr=m.x269*m.x2516 + m.x894*m.x2522 + m.x1519*m.x2528 + m.x2144*m.x2534 <= 8)
m.c2788 = Constraint(expr=m.x270*m.x2516 + m.x895*m.x2522 + m.x1520*m.x2528 + m.x2145*m.x2534 <= 8)
m.c2789 = Constraint(expr=m.x271*m.x2516 + m.x896*m.x2522 + m.x1521*m.x2528 + m.x2146*m.x2534 <= 8)
m.c2790 = Constraint(expr=m.x272*m.x2516 + m.x897*m.x2522 + m.x1522*m.x2528 + m.x2147*m.x2534 <= 8)
m.c2791 = Constraint(expr=m.x273*m.x2516 + m.x898*m.x2522 + m.x1523*m.x2528 + m.x2148*m.x2534 <= 8)
m.c2792 = Constraint(expr=m.x274*m.x2516 + m.x899*m.x2522 + m.x1524*m.x2528 + m.x2149*m.x2534 <= 8)
m.c2793 = Constraint(expr=m.x275*m.x2516 + m.x900*m.x2522 + m.x1525*m.x2528 + m.x2150*m.x2534 <= 8)
m.c2794 = Constraint(expr=m.x276*m.x2516 + m.x901*m.x2522 + m.x1526*m.x2528 + m.x2151*m.x2534 <= 8)
m.c2795 = Constraint(expr=m.x277*m.x2516 + m.x902*m.x2522 + m.x1527*m.x2528 + m.x2152*m.x2534 <= 8)
m.c2796 = Constraint(expr=m.x278*m.x2516 + m.x903*m.x2522 + m.x1528*m.x2528 + m.x2153*m.x2534 <= 8)
m.c2797 = Constraint(expr=m.x279*m.x2516 + m.x904*m.x2522 + m.x1529*m.x2528 + m.x2154*m.x2534 <= 8)
m.c2798 = Constraint(expr=m.x280*m.x2516 + m.x905*m.x2522 + m.x1530*m.x2528 + m.x2155*m.x2534 <= 8)
m.c2799 = Constraint(expr=m.x281*m.x2516 + m.x906*m.x2522 + m.x1531*m.x2528 + m.x2156*m.x2534 <= 8)
m.c2800 = Constraint(expr=m.x282*m.x2516 + m.x907*m.x2522 + m.x1532*m.x2528 + m.x2157*m.x2534 <= 8)
m.c2801 = Constraint(expr=m.x283*m.x2516 + m.x908*m.x2522 + m.x1533*m.x2528 + m.x2158*m.x2534 <= 8)
m.c2802 = Constraint(expr=m.x284*m.x2516 + m.x909*m.x2522 + m.x1534*m.x2528 + m.x2159*m.x2534 <= 8)
m.c2803 = Constraint(expr=m.x285*m.x2516 + m.x910*m.x2522 + m.x1535*m.x2528 + m.x2160*m.x2534 <= 8)
m.c2804 = Constraint(expr=m.x286*m.x2516 + m.x911*m.x2522 + m.x1536*m.x2528 + m.x2161*m.x2534 <= 8)
m.c2805 = Constraint(expr=m.x287*m.x2516 + m.x912*m.x2522 + m.x1537*m.x2528 + m.x2162*m.x2534 <= 8)
m.c2806 = Constraint(expr=m.x288*m.x2516 + m.x913*m.x2522 + m.x1538*m.x2528 + m.x2163*m.x2534 <= 8)
m.c2807 = Constraint(expr=m.x289*m.x2516 + m.x914*m.x2522 + m.x1539*m.x2528 + m.x2164*m.x2534 <= 8)
m.c2808 = Constraint(expr=m.x290*m.x2516 + m.x915*m.x2522 + m.x1540*m.x2528 + m.x2165*m.x2534 <= 8)
m.c2809 = Constraint(expr=m.x291*m.x2516 + m.x916*m.x2522 + m.x1541*m.x2528 + m.x2166*m.x2534 <= 8)
m.c2810 = Constraint(expr=m.x292*m.x2516 + m.x917*m.x2522 + m.x1542*m.x2528 + m.x2167*m.x2534 <= 8)
m.c2811 = Constraint(expr=m.x293*m.x2516 + m.x918*m.x2522 + m.x1543*m.x2528 + m.x2168*m.x2534 <= 8)
m.c2812 = Constraint(expr=m.x294*m.x2516 + m.x919*m.x2522 + m.x1544*m.x2528 + m.x2169*m.x2534 <= 8)
m.c2813 = Constraint(expr=m.x295*m.x2516 + m.x920*m.x2522 + m.x1545*m.x2528 + m.x2170*m.x2534 <= 8)
m.c2814 = Constraint(expr=m.x296*m.x2516 + m.x921*m.x2522 + m.x1546*m.x2528 + m.x2171*m.x2534 <= 8)
m.c2815 = Constraint(expr=m.x297*m.x2516 + m.x922*m.x2522 + m.x1547*m.x2528 + m.x2172*m.x2534 <= 8)
m.c2816 = Constraint(expr=m.x298*m.x2516 + m.x923*m.x2522 + m.x1548*m.x2528 + m.x2173*m.x2534 <= 8)
m.c2817 = Constraint(expr=m.x299*m.x2516 + m.x924*m.x2522 + m.x1549*m.x2528 + m.x2174*m.x2534 <= 8)
m.c2818 = Constraint(expr=m.x300*m.x2516 + m.x925*m.x2522 + m.x1550*m.x2528 + m.x2175*m.x2534 <= 8)
m.c2819 = Constraint(expr=m.x301*m.x2516 + m.x926*m.x2522 + m.x1551*m.x2528 + m.x2176*m.x2534 <= 8)
m.c2820 = Constraint(expr=m.x302*m.x2516 + m.x927*m.x2522 + m.x1552*m.x2528 + m.x2177*m.x2534 <= 8)
m.c2821 = Constraint(expr=m.x303*m.x2516 + m.x928*m.x2522 + m.x1553*m.x2528 + m.x2178*m.x2534 <= 8)
m.c2822 = Constraint(expr=m.x304*m.x2516 + m.x929*m.x2522 + m.x1554*m.x2528 + m.x2179*m.x2534 <= 8)
m.c2823 = Constraint(expr=m.x305*m.x2516 + m.x930*m.x2522 + m.x1555*m.x2528 + m.x2180*m.x2534 <= 8)
m.c2824 = Constraint(expr=m.x306*m.x2516 + m.x931*m.x2522 + m.x1556*m.x2528 + m.x2181*m.x2534 <= 8)
m.c2825 = Constraint(expr=m.x307*m.x2516 + m.x932*m.x2522 + m.x1557*m.x2528 + m.x2182*m.x2534 <= 8)
m.c2826 = Constraint(expr=m.x308*m.x2516 + m.x933*m.x2522 + m.x1558*m.x2528 + m.x2183*m.x2534 <= 8)
m.c2827 = Constraint(expr=m.x309*m.x2516 + m.x934*m.x2522 + m.x1559*m.x2528 + m.x2184*m.x2534 <= 8)
m.c2828 = Constraint(expr=m.x310*m.x2516 + m.x935*m.x2522 + m.x1560*m.x2528 + m.x2185*m.x2534 <= 8)
m.c2829 = Constraint(expr=m.x311*m.x2516 + m.x936*m.x2522 + m.x1561*m.x2528 + m.x2186*m.x2534 <= 8)
m.c2830 = Constraint(expr=m.x312*m.x2516 + m.x937*m.x2522 + m.x1562*m.x2528 + m.x2187*m.x2534 <= 8)
m.c2831 = Constraint(expr=m.x313*m.x2516 + m.x938*m.x2522 + m.x1563*m.x2528 + m.x2188*m.x2534 <= 8)
m.c2832 = | |
import re
import sys
from io import StringIO
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.decomposition import NMF, MiniBatchNMF
from sklearn.decomposition import non_negative_factorization
from sklearn.decomposition import _nmf as nmf # For testing internals
from scipy.sparse import csc_matrix
import pytest
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.extmath import squared_norm
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_convergence_warning(Estimator, solver):
convergence_warning = (
"Maximum number of iterations 1 reached. Increase it to improve convergence."
)
A = np.ones((2, 2))
with pytest.warns(ConvergenceWarning, match=convergence_warning):
Estimator(max_iter=1, **solver).fit(A)
def test_initialize_nn_output():
# Test that initialization does not return negative values
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
for init in ("random", "nndsvd", "nndsvda", "nndsvdar"):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert not ((W < 0).any() or (H < 0).any())
@pytest.mark.filterwarnings(
r"ignore:The multiplicative update \('mu'\) solver cannot update zeros present in"
r" the initialization"
)
def test_parameter_checking():
A = np.ones((2, 2))
name = "spam"
with ignore_warnings(category=FutureWarning):
# TODO remove in 1.2
msg = "Invalid regularization parameter: got 'spam' instead of one of"
with pytest.raises(ValueError, match=msg):
NMF(regularization=name).fit(A)
msg = "Invalid beta_loss parameter: solver 'cd' does not handle beta_loss = 1.0"
with pytest.raises(ValueError, match=msg):
NMF(solver="cd", beta_loss=1.0).fit(A)
msg = "Negative values in data passed to"
with pytest.raises(ValueError, match=msg):
NMF().fit(-A)
clf = NMF(2, tol=0.1).fit(A)
with pytest.raises(ValueError, match=msg):
clf.transform(-A)
with pytest.raises(ValueError, match=msg):
nmf._initialize_nmf(-A, 2, "nndsvd")
for init in ["nndsvd", "nndsvda", "nndsvdar"]:
msg = re.escape(
"init = '{}' can only be used when "
"n_components <= min(n_samples, n_features)".format(init)
)
with pytest.raises(ValueError, match=msg):
NMF(3, init=init).fit(A)
with pytest.raises(ValueError, match=msg):
MiniBatchNMF(3, init=init).fit(A)
with pytest.raises(ValueError, match=msg):
nmf._initialize_nmf(A, 3, init)
@pytest.mark.parametrize(
"param, match",
[
({"n_components": 0}, "Number of components must be a positive integer"),
({"max_iter": -1}, "Maximum number of iterations must be a positive integer"),
({"tol": -1}, "Tolerance for stopping criteria must be positive"),
({"init": "wrong"}, "Invalid init parameter"),
({"beta_loss": "wrong"}, "Invalid beta_loss parameter"),
],
)
@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
def test_nmf_common_wrong_params(Estimator, param, match):
# Check that appropriate errors are raised for invalid values of parameters common
# to NMF and MiniBatchNMF.
A = np.ones((2, 2))
with pytest.raises(ValueError, match=match):
Estimator(**param).fit(A)
@pytest.mark.parametrize(
"param, match",
[
({"solver": "wrong"}, "Invalid solver parameter"),
],
)
def test_nmf_wrong_params(param, match):
# Check that appropriate errors are raised for invalid values specific to NMF
# parameters
A = np.ones((2, 2))
with pytest.raises(ValueError, match=match):
NMF(**param).fit(A)
@pytest.mark.parametrize(
"param, match",
[
({"batch_size": 0}, "batch_size must be a positive integer"),
],
)
def test_minibatch_nmf_wrong_params(param, match):
# Check that appropriate errors are raised for invalid values specific to
# MiniBatchNMF parameters
A = np.ones((2, 2))
with pytest.raises(ValueError, match=match):
MiniBatchNMF(**param).fit(A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init="nndsvd")
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert error <= sdev
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init="nndsvd")
Wa, Ha = nmf._initialize_nmf(data, 10, init="nndsvda")
War, Har = nmf._initialize_nmf(data, 10, init="nndsvdar", random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_almost_equal(evl[ref != 0], ref[ref != 0])
# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@ignore_warnings(category=UserWarning)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
@pytest.mark.parametrize("init", (None, "nndsvd", "nndsvda", "nndsvdar", "random"))
@pytest.mark.parametrize("alpha_W", (0.0, 1.0))
@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
def test_nmf_fit_nn_output(Estimator, solver, init, alpha_W, alpha_H):
# Test that the decomposition does not contain negative values
A = np.c_[5.0 - np.arange(1, 6), 5.0 + np.arange(1, 6)]
model = Estimator(
n_components=2,
init=init,
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=0,
**solver,
)
transf = model.fit_transform(A)
assert not ((model.components_ < 0).any() or (transf < 0).any())
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_fit_close(Estimator, solver):
rng = np.random.mtrand.RandomState(42)
# Test that the fit is not too far away
pnmf = Estimator(
5,
init="nndsvdar",
random_state=0,
max_iter=600,
**solver,
)
X = np.abs(rng.randn(6, 5))
assert pnmf.fit(X).reconstruction_err_ < 0.1
def test_nmf_true_reconstruction():
# Test that the fit is not too far away from an exact solution
# (by construction)
n_samples = 15
n_features = 10
n_components = 5
beta_loss = 1
batch_size = 3
max_iter = 1000
rng = np.random.mtrand.RandomState(42)
W_true = np.zeros([n_samples, n_components])
W_array = np.abs(rng.randn(n_samples))
for j in range(n_components):
W_true[j % n_samples, j] = W_array[j % n_samples]
H_true = np.zeros([n_components, n_features])
H_array = np.abs(rng.randn(n_components))
for j in range(n_features):
H_true[j % n_components, j] = H_array[j % n_components]
X = np.dot(W_true, H_true)
model = NMF(
n_components=n_components,
solver="mu",
beta_loss=beta_loss,
max_iter=max_iter,
random_state=0,
)
transf = model.fit_transform(X)
X_calc = np.dot(transf, model.components_)
assert model.reconstruction_err_ < 0.1
assert_allclose(X, X_calc)
mbmodel = MiniBatchNMF(
n_components=n_components,
beta_loss=beta_loss,
batch_size=batch_size,
random_state=0,
max_iter=max_iter,
)
transf = mbmodel.fit_transform(X)
X_calc = np.dot(transf, mbmodel.components_)
assert mbmodel.reconstruction_err_ < 0.1
assert_allclose(X, X_calc, atol=1)
@pytest.mark.parametrize("solver", ["cd", "mu"])
def test_nmf_transform(solver):
# Test that fit_transform is equivalent to fit.transform for NMF
# Test that NMF.transform returns close values
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
m = NMF(
solver=solver,
n_components=3,
init="random",
random_state=0,
tol=1e-6,
)
ft = m.fit_transform(A)
t = m.transform(A)
assert_allclose(ft, t, atol=1e-1)
def test_minibatch_nmf_transform():
# Test that fit_transform is equivalent to fit.transform for MiniBatchNMF
# Only guaranteed with fresh restarts
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
m = MiniBatchNMF(
n_components=3,
random_state=0,
tol=1e-3,
fresh_restarts=True,
)
ft = m.fit_transform(A)
t = m.transform(A)
assert_allclose(ft, t)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_transform_custom_init(Estimator, solver):
# Smoke test that checks if NMF.transform works with custom initialization
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 5))
n_components = 4
avg = np.sqrt(A.mean() / n_components)
H_init = np.abs(avg * random_state.randn(n_components, 5))
W_init = np.abs(avg * random_state.randn(6, n_components))
m = Estimator(
n_components=n_components, init="custom", random_state=0, tol=1e-3, **solver
)
m.fit_transform(A, W=W_init, H=H_init)
m.transform(A)
@pytest.mark.parametrize("solver", ("cd", "mu"))
def test_nmf_inverse_transform(solver):
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
m = NMF(
solver=solver,
n_components=4,
init="random",
random_state=0,
max_iter=1000,
)
ft = m.fit_transform(A)
A_new = m.inverse_transform(ft)
assert_array_almost_equal(A, A_new, decimal=2)
def test_mbnmf_inverse_transform():
# Test that MiniBatchNMF.transform followed by MiniBatchNMF.inverse_transform
# is close to the identity
rng = np.random.RandomState(0)
A = np.abs(rng.randn(6, 4))
nmf = MiniBatchNMF(
random_state=rng,
max_iter=500,
init="nndsvdar",
fresh_restarts=True,
)
ft = nmf.fit_transform(A)
A_new = nmf.inverse_transform(ft)
assert_allclose(A, A_new, rtol=1e-3, atol=1e-2)
@pytest.mark.parametrize("Estimator", [NMF, MiniBatchNMF])
def test_n_components_greater_n_features(Estimator):
# Smoke test for the case of more components than features.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(30, 10))
Estimator(n_components=15, random_state=0, tol=1e-2).fit(A)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
@pytest.mark.parametrize("alpha_W", (0.0, 1.0))
@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
def test_nmf_sparse_input(Estimator, solver, alpha_W, alpha_H):
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
est1 = Estimator(
n_components=5,
init="random",
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=0,
tol=0,
max_iter=100,
**solver,
)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_allclose(W1, W2)
assert_allclose(H1, H2)
@pytest.mark.parametrize(
["Estimator", "solver"],
[[NMF, {"solver": "cd"}], [NMF, {"solver": "mu"}], [MiniBatchNMF, {}]],
)
def test_nmf_sparse_transform(Estimator, solver):
# Test that transform works on sparse data. Issue #2124
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(3, 2))
A[1, 1] = 0
A = csc_matrix(A)
model = Estimator(random_state=0, n_components=2, max_iter=400, **solver)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_allclose(A_fit_tr, A_tr, atol=1e-1)
@pytest.mark.parametrize("init", ["random", "nndsvd"])
@pytest.mark.parametrize("solver", ("cd", "mu"))
@pytest.mark.parametrize("alpha_W", (0.0, 1.0))
@pytest.mark.parametrize("alpha_H", (0.0, 1.0, "same"))
def test_non_negative_factorization_consistency(init, solver, alpha_W, alpha_H):
# Test that the function is called in the same way, either directly
# or through the NMF class
max_iter = 500
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
W_nmf, H, _ = non_negative_factorization(
A,
init=init,
solver=solver,
max_iter=max_iter,
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=1,
tol=1e-2,
)
W_nmf_2, H, _ = non_negative_factorization(
A,
H=H,
update_H=False,
init=init,
solver=solver,
max_iter=max_iter,
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=1,
tol=1e-2,
)
model_class = NMF(
init=init,
solver=solver,
max_iter=max_iter,
alpha_W=alpha_W,
alpha_H=alpha_H,
random_state=1,
tol=1e-2,
)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_allclose(W_nmf, W_cls)
assert_allclose(W_nmf_2, W_cls_2)
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
msg = re.escape(
"Number of components must be a positive integer; got (n_components=1.5)"
)
with pytest.raises(ValueError, match=msg):
nnmf(A, A, | |
from .vefm_271 import mesh, vertex, edge, face
from math import pi, acos, sin, cos, atan, tan, fabs, sqrt
def check_contains(cl, name, print_value=False):
dir_class = dir(cl)
for el in dir_class:
if el.startswith("_"):
pass
else:
if print_value:
tmp = getattr(cl, el)
print(name, " contains ==>", el, " value = ", tmp)
else:
print(name, " contains ==>", el)
print("\ncheck_contains finished\n\n")
class geodesic(mesh):
def __init__(self):
mesh.__init__(self)
self.PKHG_parameters = None
self.panels = []
self.vertsdone = []
self.skeleton = [] # List of verts in the full skeleton edges.
self.vertskeleton = [] # config needs this member
self.edgeskeleton = [] # config needs this member
self.sphericalverts = []
self.a45 = pi * 0.25
self.a90 = pi * 0.5
self.a180 = pi
self.a270 = pi * 1.5
self.a360 = pi * 2
# define members here
# setparams needs:
self.frequency = None
self.eccentricity = None
self.squish = None
self.radius = None
self.square = None
self.squarez = None
self.cart = None
self.shape = None
self.baselevel = None
self.faceshape = None
self.dualflag = None
self.rotxy = None
self.rotz = None
self.klass = None
self.sform = None
self.super = None
self.odd = None
# config needs
self.panelpoints = None
self.paneledges = None
self.reversepanel = None
self.edgelength = None
self.vertsdone = None
self.panels = []
def setparameters(self, params):
parameters = self.PKHG_parameters = params
self.frequency = parameters[0] # How many subdivisions - up to 20.
self.eccentricity = parameters[1] # Elliptical if >1.0.
self.squish = parameters[2] # Flattened if < 1.0.
self.radius = parameters[3] # Exactly what it says.
self.square = parameters[4] # Controls amount of superellipse in X/Y plane.
self.squarez = parameters[5] # Controls amount of superellipse in Z dimension.
self.cart = parameters[6] # Cuts out sphericalisation step.
self.shape = parameters[7] # Full sphere, dome, flatbase.
self.baselevel = parameters[8] # Where the base is cut on a flatbase dome.
self.faceshape = parameters[9] # Triangular, hexagonal, tri-hex.
self.dualflag = parameters[10]
self.rotxy = parameters[11]
self.rotz = parameters[12]
self.klass = parameters[13]
self.sform = parameters[14]
self.super = 0 # Toggles superellipse.
if self.square != 2.0 or self.squarez != 2.0:
self.super = 1
self.odd = 0 # Is the frequency odd. It matters for dome building.
if self.frequency % 2 != 0:
self.odd = 1
def makegeodesic(self):
self.vertedgefacedata() # PKHG only a pass 13okt11
self.config() # Generate all the configuration information.
if self.klass:
self.class2()
if self.faceshape == 1:
self.hexify() # Hexagonal faces
elif self.faceshape == 2:
self.starify() # Hex and Triangle faces
if self.dualflag:
self.dual()
if not self.cart:
self.sphericalize() # Convert x,y,z positions into spherical u,v.
self.sphere2cartesian() # Convert spherical uv back into cartesian x,y,z for final shape.
for i in range(len(self.verts)):
self.verts[i].index = i
for edg in self.edges:
edg.findvect()
def vertedgefacedata(self):
pass
def config(self):
for i in range(len(self.vertskeleton)):
self.vertskeleton[i].index = i
for edges in self.edgeskeleton:
s = skeletonrow(self.frequency, edges, 0, self) # self a geodesic
self.skeleton.append(s)
for i in range(len(self.verts)):
self.verts[i].index = i
for i in range(len(self.panelpoints)):
a = self.vertsdone[self.panelpoints[i][0]][1]
b = self.vertsdone[self.panelpoints[i][1]][1]
c = self.vertsdone[self.panelpoints[i][2]][1]
panpoints = [self.verts[a],
self.verts[b],
self.verts[c]]
panedges = [self.skeleton[self.paneledges[i][0]],
self.skeleton[self.paneledges[i][1]],
self.skeleton[self.paneledges[i][2]]]
reverseflag = 0
for flag in self.reversepanel:
if flag == i:
reverseflag = 1
p = panel(panpoints, panedges, reverseflag, self)
def sphericalize(self):
if self.shape == 2:
self.cutbasecomp()
for vert in(self.verts):
x = vert.vector.x
y = vert.vector.y
z = vert.vector.z
u = self.usphericalise(x, y, z)
v = self.vsphericalise(x, y, z)
self.sphericalverts.append([u, v])
def sphere2cartesian(self):
for i in range(len(self.verts)):
if self.cart:
x = self.verts[i].vector.x * self.radius * self.eccentricity
y = self.verts[i].vector.y * self.radius
z = self.verts[i].vector.z * self.radius * self.squish
else:
u = self.sphericalverts[i][0]
v = self.sphericalverts[i][1]
if self.squish != 1.0 or self.eccentricity > 1.0:
scalez = 1 / self.squish
v = self.ellipsecomp(scalez, v)
u = self.ellipsecomp(self.eccentricity, u)
if self.super:
r1 = self.superell(self.square, u, self.rotxy)
r2 = self.superell(self.squarez, v, self.rotz)
else:
r1 = 1.0
r2 = 1.0
if self.sform[12]:
r1 = r1 * self.superform(self.sform[0], self.sform[1],
self.sform[2], self.sform[3],
self.sform[14] + u, self.sform[4],
self.sform[5], self.sform[16] * v)
if self.sform[13]:
r2 = r2 * self.superform(self.sform[6], self.sform[7],
self.sform[8], self.sform[9],
self.sform[15] + v, self.sform[10],
self.sform[11], self.sform[17] * v)
x, y, z = self.cartesian(u, v, r1, r2)
self.verts[i] = vertex((x, y, z))
def usphericalise(self, x, y, z):
if y == 0.0:
if x > 0:
theta = 0.0
else:
theta = self.a180
elif x == 0.0:
if y > 0:
theta = self.a90
else:
theta = self.a270
else:
theta = atan(y / x)
if x < 0.0 and y < 0.0:
theta = theta + self.a180
elif x < 0.0 and y > 0.0:
theta = theta + self.a180
u = theta
return u
def vsphericalise(self, x, y, z):
if z == 0.0:
phi = self.a90
else:
rho = sqrt(x ** 2 + y ** 2 + z ** 2)
phi = acos(z / rho)
v = phi
return v
def ellipsecomp(self, efactor, theta):
if theta == self.a90:
result = self.a90
elif theta == self.a270:
result = self.a270
else:
result = atan(tan(theta) / efactor**0.5)
if result >= 0.0:
x = result
y = self.a180 + result
if fabs(x - theta) <= fabs(y - theta):
result = x
else:
result = y
else:
x = self.a180 + result
y = result
if fabs(x - theta) <= fabs(y - theta):
result = x
else:
result = y
return result
def cutbasecomp(self):
pass
def cartesian(self, u, v, r1, r2):
x = r1 * cos(u) * r2 * sin(v) * self.radius * self.eccentricity
y = r1 * sin(u) * r2 * sin(v) * self.radius
z = r2 * cos(v) * self.radius * self.squish
return x, y, z
class edgerow:
def __init__(self, count, anchor, leftindex, rightindex, stepvector, endflag, parentgeo):
self.points = []
self.edges = []
# Make a row of evenly spaced points.
for i in range(count + 1):
if i == 0:
self.points.append(leftindex)
elif i == count and not endflag:
self.points.append(rightindex)
else: # PKHG Vectors added!
newpoint = anchor + (stepvector * i)
vertcount = len(parentgeo.verts)
self.points.append(vertcount)
newpoint.index = vertcount
parentgeo.verts.append(newpoint)
for i in range(count):
a = parentgeo.verts[self.points[i]]
b = parentgeo.verts[self.points[i + 1]]
line = edge(a, b)
self.edges.append(len(parentgeo.edges))
parentgeo.edges.append(line)
class skeletonrow:
def __init__(self, count, skeletonedge, shortflag, parentgeo):
self.points = []
self.edges = []
self.vect = skeletonedge.vect
self.step = skeletonedge.vect / float(count)
# Make a row of evenly spaced points.
for i in range(count + 1):
vert1 = skeletonedge.a
vert2 = skeletonedge.b
if i == 0:
if parentgeo.vertsdone[vert1.index][0]:
self.points.append(parentgeo.vertsdone[vert1.index][1])
else:
newpoint = vertex(vert1.vector)
vertcount = len(parentgeo.verts)
self.points.append(vertcount)
newpoint.index = vertcount
parentgeo.vertsdone[vert1.index] = [1, vertcount]
parentgeo.verts.append(newpoint)
elif i == count:
if parentgeo.vertsdone[vert2.index][0]:
self.points.append(parentgeo.vertsdone[vert2.index][1])
else:
newpoint = vertex(vert2.vector)
vertcount = len(parentgeo.verts)
self.points.append(vertcount)
newpoint.index = vertcount
parentgeo.vertsdone[vert2.index] = [1, vertcount]
parentgeo.verts.append(newpoint)
else:
newpoint = vertex(vert1.vector + (self.step * i)) # must be a vertex!
vertcount = len(parentgeo.verts)
self.points.append(vertcount)
newpoint.index = vertcount
parentgeo.verts.append(newpoint)
for i in range(count):
a = parentgeo.verts[self.points[i]]
b = parentgeo.verts[self.points[i + 1]]
line = edge(a, b)
self.edges.append(len(parentgeo.edges))
parentgeo.edges.append(line)
class facefill:
def __init__(self, upper, lower, reverseflag, parentgeo, finish):
for i in range(finish):
a, b, c = upper.points[i], lower.points[i + 1], lower.points[i]
if reverseflag:
upface = face([parentgeo.verts[a], parentgeo.verts[c], parentgeo.verts[b]])
else:
upface = face([parentgeo.verts[a], parentgeo.verts[b], parentgeo.verts[c]])
parentgeo.faces.append(upface)
if i == finish - 1:
pass
else:
d = upper.points[i + 1]
if reverseflag:
downface = face([parentgeo.verts[b], parentgeo.verts[d], parentgeo.verts[a]])
else:
downface = face([parentgeo.verts[b], parentgeo.verts[a], parentgeo.verts[d]])
line = edge(parentgeo.verts[a], parentgeo.verts[b])
line2 = edge(parentgeo.verts[d], parentgeo.verts[b])
parentgeo.faces.append(downface)
parentgeo.edges.append(line)
parentgeo.edges.append(line2)
class panel:
def __init__(self, points, edges, reverseflag, parentgeo):
self.cardinal = points[0]
self.leftv = points[1]
self.rightv = points[2]
self.leftedge = edges[0]
self.rightedge = edges[1]
self.baseedge = edges[2]
self.rows = []
self.orient(parentgeo, edges)
self.createrows(parentgeo)
self.createfaces(parentgeo, reverseflag)
def orient(self, parentgeo, edges):
if self.leftedge.points[0] != self.cardinal.index:
self.leftedge.points.reverse()
self.leftedge.vect.negative()
if self.rightedge.points[0] != self.cardinal.index:
self.rightedge.points.reverse()
self.rightedge.vect.negative()
if self.baseedge.points[0] != self.leftv.index:
self.baseedge.points.reverse()
self.baseedge.vect.negative()
def createrows(self, parentgeo):
for i in range(len(self.leftedge.points)):
if i == parentgeo.frequency:
newrow = self.baseedge
else:
newrow = edgerow(i, parentgeo.verts[self.leftedge.points[i]], self.leftedge.points[i],
self.rightedge.points[i], self.baseedge.step, 0, parentgeo)
self.rows.append(newrow)
def createfaces(self, parentgeo, reverseflag):
for i in range(len(self.leftedge.points) - 1):
facefill(self.rows[i], self.rows[i + 1], reverseflag, parentgeo, len(self.rows[i].points))
# for point on top? YES!
class tetrahedron(geodesic, mesh):
def __init__(self, parameter):
geodesic.__init__(mesh)
geodesic.setparameters(self, | |
_cond2, _cond3, _cond4)):
return True
return False
def extra_space_exists(str1: str, str2: str) -> bool:
"""
Return True if a space shouldn't exist between two items
"""
ls1, ls2 = len(str1), len(str2)
if _extra_space_exists(str1, str2, ls1, ls2):
return True
# 36010G20 KT
_vrb: bool = str1.startswith('VRB')
_d35 = str1[3:5].isdigit()
_d05 = str1[:5].isdigit()
_cond1 = (_d05 or (_vrb and _d35))
conds = (
str2 == 'KT' and str1[-1].isdigit() and _cond1,
# 36010K T
str2 == 'T' and ls1 >= 6 and _cond1 and str1[-1] == 'K',
# OVC022 CB
str2 in CLOUD_TRANSLATIONS and str2 not in CLOUD_LIST and ls1 >= 3 and str1[:3] in CLOUD_LIST,
# FM 122400
str1 in ['FM', 'TL'] and (str2.isdigit() or (str2.endswith('Z') and str2[:-1].isdigit())),
# TX 20/10
str1 in ['TX', 'TN'] and str2.find('/') != -1
)
if any(conds):
return True
return False
# noinspection SpellCheckingInspection
ITEM_REMV = ['AUTO', 'COR', 'NSC', 'NCD', '$', 'KT', 'M', '.', 'RTD', 'SPECI', 'METAR', 'CORR']
ITEM_REPL = {'CALM': '00000KT'}
VIS_PERMUTATIONS = [''.join(p) for p in permutations('P6SM')]
VIS_PERMUTATIONS.remove('6MPS')
def sanitize_report_list(wxdata: typing.List[str], # noqa pylint: disable=too-many-branches,too-many-locals
remove_clr_and_skc: bool = True
) -> typing.Tuple[typing.List[str], typing.List[str], str]:
"""
Sanitize wxData
We can remove and identify "one-off" elements and fix other issues before parsing a line
We also return the runway visibility and wind shear since they are very easy to recognize
and their location in the report is non-standard
"""
shear = ''
runway_vis = []
for i, item in reversed(list(enumerate(wxdata))):
ilen = len(item)
_i5d = item[:5].isdigit()
_i3d = item[1:3].isdigit()
_ivrb = item.startswith('VRB')
try:
_i5kt = item[5] in ['K', 'T']
except IndexError:
_i5kt = False
try:
_i8kt = item[8] in ['K', 'T']
except IndexError:
_i8kt = False
cond1 = (ilen == 6 and _i5kt and (_i5d or _ivrb))
cond2 = (ilen == 9 and _i8kt and item[5] == 'G' and (_i5d or _ivrb))
# Remove elements containing only '/'
# noinspection SpellCheckingInspection
if is_unknown(item):
wxdata.pop(i)
# Identify Runway Visibility
elif ilen > 4 and item[0] == 'R' and (item[3] == '/' or item[4] == '/') and _i3d:
runway_vis.append(wxdata.pop(i))
# Remove RE from wx codes, REVCTS -> VCTS
elif ilen in [4, 6] and item.startswith('RE'):
wxdata[i] = item[2:]
# Fix a slew of easily identifiable conditions where a space does not belong
elif i and extra_space_exists(wxdata[i - 1], item):
wxdata[i - 1] += wxdata.pop(i)
# Remove spurious elements
elif item in ITEM_REMV:
wxdata.pop(i)
# Remove 'Sky Clear' from METAR but not TAF
elif remove_clr_and_skc and item in ['CLR', 'SKC']:
wxdata.pop(i)
# Replace certain items
elif item in ITEM_REPL:
wxdata[i] = ITEM_REPL[item]
# Remove amend signifier from start of report ('CCA', 'CCB',etc)
elif ilen == 3 and item.startswith('CC') and item[2].isalpha():
wxdata.pop(i)
# Identify Wind Shear
elif ilen > 6 and item.startswith('WS') and item[5] == '/':
shear = wxdata.pop(i).replace('KT', '')
# Fix inconsistent 'P6SM' Ex: TP6SM or 6PSM -> P6SM
elif ilen > 3 and item[-4:] in VIS_PERMUTATIONS:
wxdata[i] = 'P6SM'
# Fix wind T
elif cond1 or cond2:
wxdata[i] = item[:-1] + 'KT'
# Fix joined TX-TN
elif ilen > 16 and len(item.split('/')) == 3:
if item.startswith('TX') and 'TN' not in item:
tn_index = item.find('TN')
wxdata.insert(i + 1, item[:tn_index])
wxdata[i] = item[tn_index:]
elif item.startswith('TN') and item.find('TX') != -1:
tx_index = item.find('TX')
wxdata.insert(i + 1, item[:tx_index])
wxdata[i] = item[tx_index:]
return wxdata, runway_vis, shear
# pylint: disable=too-many-branches
def get_altimeter(wxdata: typing.List[str], units: Units, version: str = 'NA' # noqa
) -> typing.Tuple[typing.List[str], typing.Optional[Number]]:
"""
Returns the report list and the removed altimeter item
Version is 'NA' (North American / default) or 'IN' (International)
"""
if not wxdata:
return wxdata, None
altimeter = ''
target: str = wxdata[-1]
if version == 'NA':
# Version target
if target[0] == 'A':
altimeter = wxdata.pop()[1:]
# Other version but prefer normal if available
elif target[0] == 'Q':
if wxdata[-2][0] == 'A':
wxdata.pop()
altimeter = wxdata.pop()[1:]
else:
units.altimeter = 'hPa'
altimeter = wxdata.pop()[1:].lstrip('.')
# Else grab the digits
elif len(target) == 4 and target.isdigit():
altimeter = wxdata.pop()
elif version == 'IN':
# Version target
if target[0] == 'Q':
altimeter = wxdata.pop()[1:].lstrip('.')
if '/' in altimeter:
altimeter = altimeter[:altimeter.find('/')]
# Other version but prefer normal if available
elif target[0] == 'A':
if len(wxdata) >= 2 and wxdata[-2][0] == 'Q':
wxdata.pop()
altimeter = wxdata.pop()[1:]
else:
units.altimeter = 'inHg'
altimeter = wxdata.pop()[1:]
# Some stations report both, but we only need one
if wxdata and (wxdata[-1][0] == 'A' or wxdata[-1][0] == 'Q'):
wxdata.pop()
# convert to Number
if not altimeter:
return wxdata, None
if units.altimeter == 'inHg' and '.' not in altimeter:
value = altimeter[:2] + '.' + altimeter[2:]
else:
value = altimeter
if altimeter == 'M' * len(altimeter):
return wxdata, None
while value and not value[0].isdigit():
value = value[1:]
if value.endswith('INS'):
value = value[:-3]
if altimeter.endswith('INS'):
altimeter = altimeter[:-3]
return wxdata, make_number(value, altimeter)
def get_taf_alt_ice_turb(wxdata: typing.List[str]
) -> typing.Tuple[typing.List[str], str, typing.List[str], typing.List[str]]:
"""
Returns the report list and removed: Altimeter string, Icing list, Turbulence list
"""
altimeter = ''
icing, turbulence = [], []
for i, item in reversed(list(enumerate(wxdata))):
if len(item) > 6 and item.startswith('QNH') and item[3:7].isdigit():
altimeter = wxdata.pop(i)[3:7]
elif item.isdigit():
if item[0] == '6':
icing.append(wxdata.pop(i))
elif item[0] == '5':
turbulence.append(wxdata.pop(i))
return wxdata, altimeter, icing, turbulence
def is_possible_temp(temp: str) -> bool:
"""
Returns True if all characters are digits or 'M' (for minus)
"""
for char in temp:
if not (char.isdigit() or char == 'M'):
return False
return True
def get_temp_and_dew(wxdata: typing.List[str]
) -> typing.Tuple[typing.List[str], typing.Optional[Number], typing.Optional[Number]]:
"""
Returns the report list and removed temperature and dewpoint strings
"""
for i, item in reversed(list(enumerate(wxdata))):
if '/' in item:
# ///07
if item[0] == '/':
item = '/' + item.lstrip('/')
# 07///
elif item[-1] == '/':
item = item.rstrip('/') + '/'
tempdew = item.split('/')
if len(tempdew) != 2:
continue
valid = True
for j, temp in enumerate(tempdew):
if temp in ['MM', 'XX']:
tempdew[j] = ''
elif not is_possible_temp(temp):
valid = False
break
if valid:
wxdata.pop(i)
return wxdata, make_number(tempdew[0]), make_number(tempdew[1])
return wxdata, None, None
def get_station_and_time(wxdata: typing.List[str]) -> typing.Tuple[typing.List[str], str, str]:
"""
Returns the report list and removed station ident and time strings
"""
station = wxdata.pop(0)
qtime = wxdata[0]
if wxdata and qtime.endswith('Z') and qtime[:-1].isdigit():
rtime = wxdata.pop(0)
elif wxdata and len(qtime) == 6 and qtime.isdigit():
rtime = wxdata.pop(0) + 'Z'
else:
rtime = ''
return wxdata, station, rtime
# pylint: disable=too-many-boolean-expressions
def get_wind(wxdata: typing.List[str], units: Units # noqa pylint: disable=too-many-locals
) -> typing.Tuple[typing.List[str],
typing.Optional[Number],
typing.Optional[Number],
typing.Optional[Number],
typing.List[typing.Optional[Number]]]:
"""
Returns the report list and removed:
Direction string, speed string, gust string, variable direction list
"""
direction, speed, gust = '', '', ''
variable: typing.List[typing.Optional[Number]] = []
if wxdata:
item = copy(wxdata[0])
for rep in ['(E)']:
item = item.replace(rep, '')
item = item.replace('O', '0')
# 09010KT, 09010G15KT
_cond1 = any((item.endswith('KT'), item.endswith('KTS'), item.endswith('MPS'), item.endswith('KMH')))
_cond2 = bool(len(item) == 5 or (len(item) >= 8 and item.find('G') != -1) and item.find('/') == -1)
_cond3 = (_cond2 and (item[:5].isdigit() or (item.startswith('VRB') and item[3:5].isdigit())))
if _cond1 or _cond3:
# In order of frequency
if item.endswith('KT'):
item = item.replace('KT', '')
elif item.endswith('KTS'):
item = item.replace('KTS', '')
elif item.endswith('MPS'):
units.wind_speed = 'm/s'
item = item.replace('MPS', '')
elif item.endswith('KMH'):
units.wind_speed = 'km/h'
item = item.replace('KMH', '')
direction = item[:3]
if 'G' in item:
g_index = item.find('G')
gust = item[g_index + 1:]
speed = item[3:g_index]
else:
speed = item[3:]
wxdata.pop(0)
# Separated Gust
if wxdata and 1 < len(wxdata[0]) < 4 and wxdata[0][0] == 'G' and wxdata[0][1:].isdigit():
gust = wxdata.pop(0)[1:]
# Variable Wind Direction
try:
_wxlen7 = len(wxdata[0]) == 7
except IndexError:
_wxlen7 = False
try:
_wxd03d = wxdata[0][:3].isdigit()
except IndexError:
_wxd03d = False
if wxdata and _wxlen7 and _wxd03d and wxdata[0][3] == 'V' and wxdata[0][4:].isdigit():
variable = [make_number(i, speak=i) for i in wxdata.pop(0).split('V')]
# Convert to Number
direction = CARDINAL_DIRECTIONS.get(direction, direction)
_resulting_direction = make_number(direction, speak=direction)
_resulting_speed = make_number(speed)
_resulting_gust = make_number(gust)
return wxdata, _resulting_direction, _resulting_speed, _resulting_gust, variable
def get_visibility(wxdata: typing.List[str], units: Units) -> typing.Tuple[typing.List[str], typing.Optional[Number]]:
"""
Returns the report list and removed visibility string
"""
visibility = ''
if wxdata:
item = copy(wxdata[0])
# Vis | |
# -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2011).
#
# If you make use of ETE in published work, please cite:
#
# <NAME>, <NAME> and <NAME>.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit are available in the documentation.
#
# More info at http://ete.cgenomics.org
#
#
# #END_LICENSE#############################################################
__VERSION__="ete2-2.2rev1056"
#START_LICENSE###########################################################
#
# Copyright (C) 2009 by <NAME>. All rights reserved.
# email: <EMAIL>
#
# This file is part of the Environment for Tree Exploration program (ETE).
# http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
# #END_LICENSE#############################################################
import os
import cPickle
import random
import copy
from collections import deque
import itertools
from ete2.parser.newick import read_newick, write_newick
# the following imports are necessary to set fixed styles and faces
try:
from ete2.treeview.main import NodeStyle, _FaceAreas, FaceContainer, FACE_POSITIONS
from ete2.treeview.faces import Face
except ImportError:
TREEVIEW = False
else:
TREEVIEW = True
__all__ = ["Tree", "TreeNode"]
DEFAULT_COMPACT = False
DEFAULT_SHOWINTERNAL = False
DEFAULT_DIST = 1.0
DEFAULT_SUPPORT = 1.0
DEFAULT_NAME = "NoName"
class TreeError(Exception):
"""
A problem occurred during a TreeNode operation
"""
def __init__(self, value=''):
self.value = value
def __str__(self):
return repr(self.value)
class TreeNode(object):
"""
TreeNode (Tree) class is used to store a tree structure. A tree
consists of a collection of TreeNode instances connected in a
hierarchical way. Trees can be loaded from the New Hampshire Newick
format (newick).
:argument newick: Path to the file containing the tree or, alternatively,
the text string containing the same information.
:argument 0 format: subnewick format
.. table::
====== ==============================================
FORMAT DESCRIPTION
====== ==============================================
0 flexible with support values
1 flexible with internal node names
2 all branches + leaf names + internal supports
3 all branches + all names
4 leaf branches + leaf names
5 internal and leaf branches + leaf names
6 internal branches + leaf names
7 leaf branches + all names
8 all names
9 leaf names
100 topology only
====== ==============================================
:returns: a tree node object which represents the base of the tree.
** Examples: **
::
t1 = Tree() # creates an empty tree
t2 = Tree('(A:1,(B:1,(C:1,D:1):0.5):0.5);')
t3 = Tree('/home/user/myNewickFile.txt')
"""
def _get_dist(self):
return self._dist
def _set_dist(self, value):
try:
self._dist = float(value)
except ValueError:
raise
def _get_support(self):
return self._support
def _set_support(self, value):
try:
self._support = float(value)
except ValueError:
raise
def _get_up(self):
return self._up
def _set_up(self, value):
if type(value) == type(self) or value is None:
self._up = value
else:
raise ValueError("bad node_up type")
def _get_children(self):
return self._children
def _set_children(self, value):
if type(value) == list and \
len(set([type(n)==type(self) for n in value]))<2:
self._children = value
else:
raise ValueError("bad children type")
def _get_style(self):
if self._img_style is None:
self._set_style(None)
return self._img_style
def _set_style(self, value):
self.set_style(value)
#: Branch length distance to parent node. Default = 0.0
img_style = property(fget=_get_style, fset=_set_style)
#: Branch length distance to parent node. Default = 0.0
dist = property(fget=_get_dist, fset=_set_dist)
#: Branch support for current node
support = property(fget=_get_support, fset=_set_support)
#: Pointer to parent node
up = property(fget=_get_up, fset=_set_up)
#: A list of children nodes
children = property(fget=_get_children, fset=_set_children)
def _set_face_areas(self, value):
if isinstance(value, _FaceAreas):
self._faces = value
else:
raise ValueError("[%s] is not a valid FaceAreas instance" %type(value))
def _get_face_areas(self):
if not hasattr(self, "_faces"):
self._faces = _FaceAreas()
return self._faces
faces = property(fget=_get_face_areas, \
fset=_set_face_areas)
def __init__(self, newick=None, format=0, dist=None, support=None,
name=None):
self._children = []
self._up = None
self._dist = DEFAULT_DIST
self._support = DEFAULT_SUPPORT
self._img_style = None
self.features = set([])
# Add basic features
self.features.update(["dist", "support", "name"])
if dist is not None:
self.dist = dist
if support is not None:
self.support = support
self.name = name if name is not None else DEFAULT_NAME
# Initialize tree
if newick is not None:
read_newick(newick, root_node = self, format=format)
def __nonzero__(self):
return True
def __repr__(self):
return "Tree node '%s' (%s)" %(self.name, hex(self.__hash__()))
def __and__(self, value):
""" This allows to execute tree&'A' to obtain the descendant node
whose name is A"""
value=str(value)
try:
first_match = self.iter_search_nodes(name=value).next()
return first_match
except StopIteration:
raise ValueError, "Node not found"
def __add__(self, value):
""" This allows to sum two trees."""
# Should a make the sum with two copies of the original trees?
if type(value) == self.__class__:
new_root = self.__class__()
new_root.add_child(self)
new_root.add_child(value)
return new_root
else:
raise ValueError, "Invalid node type"
def __str__(self):
""" Print tree in newick format. """
return self.get_ascii(compact=DEFAULT_COMPACT, \
show_internal=DEFAULT_SHOWINTERNAL)
def __contains__(self, item):
""" Check if item belongs to this node. The 'item' argument must
be a node instance or its associated name."""
if isinstance(item, self.__class__):
return item in set(self.get_descendants())
elif type(item)==str:
return item in set([n.name for n in self.traverse()])
def __len__(self):
"""Node len returns number of children."""
return len(self.get_leaves())
def __iter__(self):
""" Iterator over leaf nodes"""
return self.iter_leaves()
def add_feature(self, pr_name, pr_value):
"""
Add or update a node's feature.
"""
setattr(self, pr_name, pr_value)
self.features.add(pr_name)
def add_features(self, **features):
"""
Add or update several features. """
for fname, fvalue in features.iteritems():
setattr(self, fname, fvalue)
self.features.add(fname)
def del_feature(self, pr_name):
"""
Permanently deletes a node's feature.
"""
if hasattr(self, pr_name):
delattr(self, pr_name)
self.features.remove(pr_name)
# Topology management
def add_child(self, child=None, name=None, dist=None, support=None):
"""
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
:argument None child: the node instance to be added as a child.
:argument None name: the name that will be given to the child.
:argument None dist: the distance from the node to the child.
:argument None support': the support value of child partition.
:returns: The child node instance
"""
if child is None:
child = self.__class__()
if name is not None:
child.name = name
if dist is not None:
child.dist = dist
if support is not None:
child.support = support
self.children.append(child)
child.up = self
return child
def remove_child(self, child):
"""
Removes a child from this node (parent and child
nodes still exit but are no longer connected).
"""
try:
self.children.remove(child)
except ValueError, e:
raise TreeError, e
else:
child.up = None
return child
def add_sister(self, sister=None, name=None, dist=None):
"""
Adds a sister to this node. If sister node is not supplied
as an argument, a new TreeNode instance will be created and
returned.
"""
if self.up == None:
raise TreeError("A parent node is required to add a sister")
else:
return self.up.add_child(child=sister, name=name, dist=dist)
def remove_sister(self, sister=None):
"""
Removes a node's sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A | |
######################################################################
# Author: Dr. <NAME> <NAME>, <NAME>
# Username: heggens alfarozavalae, jamalie
#
# Assignment: A08: UPC Barcodes
#
# Purpose: Determine how to do some basic operations on lists
#
######################################################################
# Acknowledgements:
#
# None: Original work
# licensed under a Creative Commons
# Attribution-Noncommercial-Share Alike 3.0 United States License.
####################################################################################
import turtle # importing the turtle library
def is_valid_input(barcode):
"""
This function verifies if the barcode is 12 digits and if they are all positive numbers.
:param barcode: parameter that takes the user's input to check if it is a valid 12 digit or not
:return: Fruitful. a True or False Boolean value.
"""
if len(barcode) == 12 and barcode.isnumeric(): # checks the user's input to see if it is a valid 12 digit barcode
return True # true when the barcode is 12 digits
return False # returns false when it is not 12 digits input
def is_valid_modulo(barcode):
"""
:param barcode: takes the user's input and does several operations to the odd and even positions with the module check character method.
:return: checkdigit (the variable that should match the last digit of the barcode
"""
oddnumbers = [] # creating new list
for i in range(0,len(barcode),2): # creating for loop to go through the elements in the barcode starting from the first one (odd) and skipping every other one
oddnumbers.append(barcode[i]) # appending into the oddnumbers list each of the elements retrieved in the for loop
oddnumber_sum = sum(map(int,oddnumbers)) # adding all the elements in the list created and using map to make them integers
oddbythree = int(oddnumber_sum) * 3 # multiplying the oddnumber_sum by three as one of the steps in module check character
evennumbers = [] # creates new empty list for even numbers
for i in range(1,len(barcode),2): # for loop to start in the first even element of the barcode and skipping every other one
evennumbers.append(barcode[i]) # appending the retrieved even numbers into the empty list
evennumbers = evennumbers[:-1] # taking out the last even number (module check character)
evennumber_sum = sum(map(int,evennumbers)) # adding all the even numbers after changing them into integers.
final = oddbythree + evennumber_sum # adding the result from odd numbers and even numbers to get to the final step
final = final % 10 # checking if the final number is divisible by 10 with modulus
if final is not 0: # if function to check if the final digit is not zero
checkdigit = 10 - final # subtracting 10 from the final one when the final is not zero
else:
checkdigit = final # if there's no remainder in modulus of final % 10 the final value stays the same
return checkdigit # returning the checkdigit value
def translate(barcode):
"""
This function will translate the barcode into binary numbers so that we can draw the turtle by using the turtle module
:param barcode: taking the barcode from the user's input
:return: Fruitful. returns leftl and rights values of the lists lefside and rightside
"""
leftside = ['0001101', '0011001', '0010011', '0111101', '0100011', '0110001', '0101111', '0111011', '0110111', '0001011'] # creating a list with all the elements from the left side table.
rightside = ['1110010','1100110','1101100','1000010','1011100','1001110','1010000','1000100','1001000','1110100'] # # creating a list with all the elements from the right side table.
barcode = list(barcode) # making the barcode a list
leftl = [] # creating an empty list to go through the first 6 elements of barcode
for i in barcode[0:6]: # for loop to run in the first 6 elements
lf = leftside[int(i)] # getting the first six elements of the list
leftl.append(lf) # appending the first 6 elements into the leftl variable
rights = [] # creating an empty list to go through the remainder 6 elements of barcode
for i in barcode[6:12]: # for loop to run in the remainder 6 elements
rs = rightside[int(i)] # getting the first six elements of the list
rights.append(rs) # appending the first 6 elements into the leftl variable
return (leftl, rights) # returning both leftl and rights to use them in main for drawing
def drawing_blackline(t):
"""
:param t: turtle object that will draw the black lines in the barcode
:return: None. Void
"""
t.color("black") # setting the color of the turtle to be black
t.begin_fill() # beginning to fill with the turtle
for i in range(2): # for loop to run twice
t.forward(2) # turtle t moves forward by 2
t.left(90) # turtle t turns 90 degrees left to go up
t.forward(200) # turtle t goes forward 200 up
t.left(90) # turtle t turns 90 degrees left again
t.end_fill() # finishing the filling of t
t.forward(2) # moving to the right by 2 without leaving a trace
def drawing_blackline_long(t):
"""
:param t: turtle object that will draw the black lines in the barcode for guard and center
:return: None. Void
"""
t.color("black") # setting the color of the turtle to be black
t.begin_fill()
for i in range(2): # for loop to run twice
t.forward(2) # turtle t moves forward by 2
t.left(90) # turtle t turns 90 degrees left to go up
t.forward(248) # turtle t goes forward 248 up
t.left(90) # turtle t turns 90 degrees left again
t.end_fill() # finishing the filling of t
t.forward(2) # moving to the right by 2 without leaving a trace
def drawing_white_line(t):
"""
:param t: turtle object t to draw the while lines.
:return: none. Void function .
"""
t.color("white") # setting the color of the turtle to be black
t.begin_fill() # beginning to fill with the turtle
for i in range(2): # for loop to run twice
t.forward(2) # turtle t moves forward by 2
t.left(90) # turtle t turns 90 degrees left to go up
t.forward(200) # turtle t goes forward 200 up
t.left(90) # turtle t turns 90 degrees left again
t.end_fill() # finishing the filling of t
t.forward(2) # moving to the right by 2 without leaving a trace
def drawing_white_line_long(t):
"""
:param t: turtle object t to draw the while lines for guard and center
:return: none. Void function .
"""
t.color("white") # setting the color of the turtle to be black
t.begin_fill()
for i in range(2): # for loop to run twice
t.forward(2) # moving to the right by 2
t.left(90) # turtle t turns 90 degrees left to go up
t.forward(248) # turtle t goes forward 248 up
t.left(90)
t.end_fill() # finishing the filling of t
t.forward(2) # moving to the right by 2 without leaving a trace
def main():
"""
:return: main function where the user is asked for a barcode and list is created to run the other functions that check characters in barcode
"""
input_code = input("Enter a 12 digit code [0-9]: ") # asking user for input of barcode
while not is_valid_input(input_code): # while loop to check if it is valid
input_code = input("Invalid number. Enter a 12 digit code [0-9]: ") # asking user to input a valid barcode again
list(input_code) # making the barcode a list
# TODO turtle draw code
t = turtle.Turtle() # creating the turtle
t.hideturtle() # hiding turtle to move its position
wn = turtle.Screen() # creating the turtle screen
t.speed(0) # setting the speed of the turtle
t.penup() # putting the pen up to start moving
t.setpos(-250, -100) # setting the left side position
left, right = translate(input_code) # calling the two return variables from the translate function
if is_valid_modulo(input_code) != int(input_code[11]): # if function run the module check character in the barcode
t.write("Wrong barcode.", move=False, align="left", font=("Arial", 15, "normal")) # writing the text when the barcode doesnt exist
else:
guard_left = ["1", "0", "1"] # creating list for left guard
for i in guard_left: # loop for left guard
if i == "0": # if function for drawing white lines when i is 0
drawing_white_line_long(t)
else:
drawing_blackline_long(t) # # if function for drawing white lines when i | |
"""
Toolbox for simulating compositional data from ScRNA-seq
This toolbox provides data generation and modelling solutions for compositional data with different specifications.
This data might e.g. come from scRNA-seq experiments.
For scenarios 1-4, we first generate composition parameters (b_true, w_true) and a covariance matrix (x) from some input specifications.
We then build a concentration vector for each sample (row of x) that sums up to 1. From there, we can calculate each row of the cell count matrix (y) via a multinomial distribution
:authors: <NAME>
"""
import numpy as np
import anndata as ad
import pandas as pd
from scipy.special import softmax
def generate_normal_uncorrelated(N, D, K, n_total, noise_std_true=1):
"""
Scenario 1: Normally distributed, independent covariates
Parameters
----------
N -- int
Number of samples
D -- int
Number of covariates
K -- int
Number of cell types
n_total -- list
Number of individual cells per sample
noise_std_true -- float
noise level. 0: No noise
Returns
-------
data
Anndata object
"""
# Generate random composition parameters
b_true = np.random.normal(0, 1, size=K).astype(np.float32) # bias (alpha)
w_true = np.random.normal(0, 1, size=(D, K)).astype(np.float32) # weights (beta)
# Generate random covariate matrix
x = np.random.normal(0, 1, size=(N, D)).astype(np.float32)
noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Generate y
y = np.zeros([N, K], dtype=np.float32)
for i in range(N):
# Concentration should sum to 1 for each sample
concentration = softmax(x[i, :].T@w_true + b_true + noise[i, :]).astype(np.float32)
y[i, :] = np.random.multinomial(n_total[i], concentration).astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def generate_normal_correlated(N, D, K, n_total, noise_std_true, covariate_mean=None, covariate_var=None):
"""
Scenario 2: Correlated covariates
Parameters
----------
N -- int
Number of samples
D -- int
Number of covariates
K -- int
Number of cell types
n_total -- list
Number of individual cells per sample
noise_std_true -- float
noise level. 0: No noise
covariate_mean -- numpy array [D]
Mean of each covariate
covariate_var -- numpy array [DxD]
Covariance matrix for covariates
Returns
-------
data
Anndata object
"""
if covariate_mean is None:
covariate_mean = np.zeros(shape=D)
# Generate randomized covariate covariance matrix if none is specified
if covariate_var is None:
# Covariates drawn from MvNormal(0, Cov), Cov_ij = p ^|i-j| , p=0.4
# Tibshirani for correlated covariates: Tibshirani (1996)
p = 0.4
covariate_var = np.zeros((D, D))
for i in range(D):
for j in range(D):
covariate_var[i, j] = p**np.abs(i-j)
# Generate random composition parameters
b_true = np.random.normal(0, 1, size=K).astype(np.float32) # bias (alpha)
w_true = np.random.normal(0, 1, size=(D, K)).astype(np.float32) # weights (beta)
# Generate random covariate matrix
x = np.random.multivariate_normal(size=N, mean=covariate_mean, cov=covariate_var).astype(np.float32)
noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Generate y
y = np.zeros([N, K], dtype=np.float32)
for i in range(N):
# Concentration should sum to 1 for each sample
concentration = softmax(x[i, :].T @ w_true + b_true + noise[i, :]).astype(np.float32)
y[i, :] = np.random.multinomial(n_total[i], concentration).astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def generate_normal_xy_correlated(N, D, K, n_total, noise_std_true=1,
covariate_mean=None, covariate_var=None, sigma=None):
"""
Scenario 3: Correlated cell types and covariates
Parameters
----------
N -- int
Number of samples
D -- int
Number of covariates
K -- int
Number of cell types
n_total -- list
Number of individual cells per sample
noise_std_true -- float
noise level. 0: No noise
covariate_mean -- numpy array [D]
Mean of each covariate
covariate_var -- numpy array [DxD]
Covariance matrix for all covaraiates
sigma -- numpy array [KxK]
correlation matrix for cell types
Returns
-------
data
Anndata object
"""
if covariate_mean is None:
covariate_mean = np.zeros(shape=D)
if sigma is None:
sigma = np.identity(K)
# Generate randomized covariate covariance matrix if none is specified
if covariate_var is None:
# Covaraits drawn from MvNormal(0, Cov) Cov_ij = p ^|i-j| , p=0.4
# Tibshirani for correlated covariates: Tibshirani (1996)
p = 0.4
covariate_var = np.zeros((D, D))
for i in range(D):
for j in range(D):
covariate_var[i, j] = p**np.abs(i-j)
# Generate random composition parameters
b_true = np.random.normal(0, 1, size=K).astype(np.float32) # bias (alpha)
w_true = np.random.normal(0, 1, size=(D, K)).astype(np.float32) # weights (beta)
# Generate random covariate matrix
x = np.random.multivariate_normal(size=N, mean=covariate_mean, cov=covariate_var).astype(np.float32)
noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Generate y
y = np.zeros([N, K], dtype=np.float32)
for i in range(N):
# Each row of y is now influenced by sigma
alpha = np.random.multivariate_normal(mean=x[i, :].T@w_true + b_true, cov=sigma*noise[i, :]).astype(np.float32)
concentration = softmax(alpha).astype(np.float32)
y[i, :] = np.random.multinomial(n_total[i], concentration).astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def sparse_effect_matrix(D, K, n_d, n_k):
"""
Generates a sparse effect matrix
Parameters
----------
D -- int
Number of covariates
K -- int
Number of cell types
n_d -- int
Number of covariates that effect a cell type
n_k -- int
Number of cell types that are affected by any covariate
Returns
-------
w_true
Effect matrix
"""
# Choose indices of affected cell types and covariates randomly
d_eff = np.random.choice(range(D), size=n_d, replace=False)
k_eff = np.random.choice(range(K), size=n_k, replace=False)
# Possible entries of w_true
w_choice = [0.3, 0.5, 1]
w_true = np.zeros((D, K))
# Fill in w_true
for i in d_eff:
for j in k_eff:
c = np.random.choice(3, 1)
w_true[i, j] = w_choice[c]
return w_true
def generate_sparse_xy_correlated(N, D, K, n_total, noise_std_true=1,
covariate_mean=None, covariate_var=None,
sigma=None,
b_true=None, w_true=None):
"""
Scenario 4: Sparse true parameters
Parameters
----------
N -- int
Number of samples
D -- int
Number of covariates
K -- int
Number of cell types
n_total -- list
Number of individual cells per sample
noise_std_true -- float
noise level. 0: No noise
covariate_mean -- numpy array [D]
Mean of each covariate
covariate_var -- numpy array [DxD]
Covariance matrix for all covaraiates
sigma -- numpy array [KxK]
correlation matrix for cell types
b_true -- numpy array [K]
bias coefficients
w_true -- numpy array [DxK]
Effect matrix
Returns
-------
data
Anndata object
"""
if covariate_mean is None:
covariate_mean = np.zeros(shape=D)
if sigma is None:
sigma = np.identity(K)
# Generate randomized covariate covariance matrix if none is specified
if covariate_var is None:
# Covaraits drawn from MvNormal(0, Cov) Cov_ij = p ^|i-j| , p=0.4
# Tibshirani for correlated covariates: Tibshirani (1996)
p = 0.4
covariate_var = np.zeros((D, D))
for i in range(D):
for j in range(D):
covariate_var[i, j] = p ** np.abs(i - j)
# Uniform intercepts if none are specifed
if b_true is None:
b_true = np.random.uniform(-3,3, size=K).astype(np.float32) # bias (alpha)
# Randomly select covariates that should correlate if none are specified
if w_true is None:
n_d = np.random.choice(range(D), size=1)
n_k = np.random.choice(range(K), size=1)
w_true = sparse_effect_matrix(D, K, n_d, n_k)
# Generate random covariate matrix
x = np.random.multivariate_normal(size=N, mean=covariate_mean, cov=covariate_var).astype(np.float32)
noise = noise_std_true * np.random.randn(N, 1).astype(np.float32)
# Generate y
y = np.zeros([N, K], dtype=np.float32)
for i in range(N):
# Each row of y is now influenced by sigma
alpha = np.random.multivariate_normal(mean=x[i, :].T @ w_true + b_true, cov=sigma * noise[i, :]).astype(
np.float32)
concentration = softmax(alpha).astype(np.float32)
y[i, :] = np.random.multinomial(n_total[i], concentration).astype(np.float32)
x_names = ["x_" + str(n) for n in range(x.shape[1])]
x_df = pd.DataFrame(x, columns=x_names)
data = ad.AnnData(X=y, obs=x_df, uns={"b_true": b_true, "w_true": w_true})
return data
def generate_case_control(cases=1, K=5, n_total=1000, n_samples=[5,5], noise_std_true=0,
sigma=None, b_true=None, w_true=None):
"""
Generates compositional data with binary covariates
Parameters
----------
cases -- int
number of covariates
K -- int
Number of cell types
n_total -- int
number of cells per sample
n_samples -- list
Number of samples per case combination as array[2**cases]
noise_std_true -- float
noise level. 0: No noise - Not in use atm!!!
sigma -- numpy array [KxK]
correlation matrix for cell types
b_true -- numpy array [K]
bias coefficients
w_true -- numpy array [DxK]
Effect matrix
Returns
-------
Anndata object
"""
D = cases**2
# Uniform intercepts if none are specifed
if b_true is None:
b_true = np.random.uniform(-3, 3, size=K).astype(np.float32) # bias (alpha)
# Randomly select covariates that should correlate if none are specified
if w_true is None:
n_d = np.random.choice(range(D), size=1)
n_k = np.random.choice(range(K), | |
<filename>ztfquery/sedm.py
#! /usr/bin/env python
#
""" Access SEDM data from pharos """
PHAROS_BASEURL = "http://pharos.caltech.edu"
import os
import requests
import json
import numpy as np
import pandas
import warnings
from . import io
SEDMLOCAL_BASESOURCE = io.LOCALSOURCE+"SEDM"
SEDMLOCALSOURCE = SEDMLOCAL_BASESOURCE+"/redux"
if not os.path.exists(SEDMLOCAL_BASESOURCE):
os.makedirs(SEDMLOCAL_BASESOURCE)
if not os.path.exists(SEDMLOCALSOURCE):
os.makedirs(SEDMLOCALSOURCE)
#######################
# #
# High level method #
# #
#######################
def _download_sedm_data_(night, pharosfile, fileout=None, verbose=False):
""" """
url = PHAROS_BASEURL+"/data/%s/"%night+pharosfile
if verbose:
print(url)
return io.download_single_url(url,fileout=fileout,
auth=io._load_id_("pharos"),
cookies="no_cookies")
def _relative_to_source_(relative_datapath, source=None):
""" """
if source is None:
return relative_datapath
if source in ["pharos"]:
return [PHAROS_BASEURL+"/data/"+l for l in relative_datapath]
if source in ["local"]:
return [SEDMLOCALSOURCE+"/"+l for l in relative_datapath]
def get_night_file(night):
""" get the what.list for a given night
night format: YYYYMMDD
"""
response = _download_sedm_data_(night, "what.list")
return response.text.splitlines()
def get_pharos_night_data(date, auth=None):
""" """
username,password = io._load_id_("pharos") if auth is None else auth
requests_prop = {"data":json.dumps({"obsdate":date,
"username":username,
"password":password,
}),
"headers":{'content-type': 'application/json'}}
t = requests.post(PHAROS_BASEURL+"/get_user_observations", **requests_prop).text
if "data" not in t:
raise IOError("night file download fails. Check you authentification maybe?")
return np.sort(json.loads(t)["data"])
#######################
# #
# INTERNAL JSON DB #
# #
#######################
# 20181012 20181105
EMPTY_WHAT_DF = pandas.DataFrame(columns=["filename","airmass", "shutter", "exptime", "target", "night"])
def _parse_line_(line):
""" """
try:
filename, rest = line.split('(')
info, what = rest.split(")")
what = what.replace(":", "")
return [filename.replace(" ","")]+info.split("/")+[what.replace(" [A]","").strip()]
except:
return None
def whatfiles_to_dataframe(whatfile):
""" """
parsed_lines = [_parse_line_(l_) for l_ in whatfile]
return pandas.DataFrame([l for l in parsed_lines if l is not None],
columns=["filename","airmass", "shutter", "exptime", "target"])
class _SEDMFiles_():
""" """
SOURCEFILE = SEDMLOCAL_BASESOURCE+"/whatfiles.json"
PHAROSFILES = SEDMLOCAL_BASESOURCE+"/pharosfiles.json"
def __init__(self):
""" """
self.load()
def get_night_data(self, night, from_dict=False):
""" """
if from_dict:
df_night = pandas.DataFrame(whatfiles_to_dataframe(self._data[night]))
df_night["night"] = night
return df_night
return self.data[self.data["night"].isin(np.atleast_1d(night))]
def get_pharos_night_data(self, night):
""" """
return self._pharoslist[night]
def get_data_betweenrange(self, start="2018-08-01", end=None):
""" """
lower_bound = True if start is None else (self.datetime>start)
upper_bound = True if end is None and end not in ["now","today"] else (self.datetime<end)
return self.data[lower_bound & upper_bound]
def get_target_data(self, target, timerange=None):
""" """
data_ = self.data if timerange is None else self.get_data_betweenrange(*timerange)
return data_[data_["target"].isin(np.atleast_1d(target))]
def get_observed_targets(self, timerange=None):
""" """
data_ = self.data if timerange is None else self.get_data_betweenrange(*timerange)
return np.unique(data_["target"])
def get_nights_with_target(self, target, timerange=None):
""" """
return np.unique( self.get_target_data(target, timerange=timerange)["night"] )
# -------- #
# I/O #
# -------- #
def download_nightrange(self, start="2018-08-01", end="now", update=False, pharosfiles=False, dump=True):
""" """
if end is None or end in ["today", "now"]:
from datetime import datetime
today = datetime.today()
end = today.isoformat().split("T")[0]
self.add_night(["%4d%02d%02d"%(tt.year,tt.month, tt.day) for tt in pandas.date_range(start=start, end=end) ], update=update)
if pharosfiles:
self.add_pharoslist(["%4d%02d%02d"%(tt.year,tt.month, tt.day) for tt in pandas.date_range(start=start, end=end) ], update=update)
if dump:
self.dump("whatfile" if not pharosfiles else "both")
def add_night(self, night, update=False):
""" night (or list of) with the given format YYYYMMDD
if the given night is already known, this will the download except if update is True
"""
for night_ in np.atleast_1d(night):
if night_ in self._data and not update:
continue
self._data[night_] = get_night_file(night_)
self.dump("whatfile")
self._build_dataframe_()
def load(self):
""" """
# What Files
if os.path.isfile( self.SOURCEFILE ):
self._data = json.load( open(self.SOURCEFILE, 'r') )
else:
self._data = {}
# What Pharos Data
if os.path.isfile( self.PHAROSFILES ):
self._pharoslist = json.load( open(self.PHAROSFILES, 'r') )
else:
self._pharoslist = {}
self._build_dataframe_()
def dump(self, which="both"):
""" Save the current version of whatfiles and or pharos files on your computer.
Parameters
----------
which: [str] -optional-
what kind of data do you want to dump ?
- whatfile
- pharosfile
- both
"""
if not which in ["whatfile","pharosfile","both","*", "all"]:
raise ValueError("which can only be whatfile or pharosfile or both")
if which in ["whatfile", "both","*", "all"]:
with open(self.SOURCEFILE, 'w') as outfile:
json.dump(self._data, outfile)
if which in ["pharosfile","both","*", "all"]:
with open(self.PHAROSFILES, 'w') as outfile:
json.dump(self._pharoslist, outfile)
def _build_dataframe_(self):
""" """
if len(self._data.keys())>0:
self.data = pandas.concat(self.get_night_data(night, from_dict=True) for night in self._data.keys())
else:
self.data = EMPTY_WHAT_DF
# ---------------- #
# Pharos Data #
# ---------------- #
def add_pharoslist(self, night, update=False):
""" """
for night_ in np.atleast_1d(night):
if night_ in self._pharoslist and not update:
continue
try:
self._pharoslist[night_] = [l.replace("/data/","") for l in get_pharos_night_data(night_)]
except:
warnings.warn("Pharos List download: Failed for %s"%night_)
self.dump("pharosfile")
# ================ #
# Properties #
# ================ #
@property
def datetime(self):
""" pandas.to_datetime(p.sedmwhatfiles.data["night"]) """
return pandas.to_datetime(self.data["night"])
##################
# #
# PHAROS #
# #
##################
class SEDMQuery( object ):
""" """
PROPERTIES = ["auth", "date"]
def __init__(self, auth=None, date=None):
""" """
self.sedmwhatfiles = _SEDMFiles_()
self.reset()
self.set_date(date)
self.set_auth(io._load_id_("pharos") if auth is None else auth)
def reset(self):
""" set the authentification, date and any other properties to default """
self._properties = {k:None for k in self.PROPERTIES}
# -------- #
# SETTER #
# -------- #
def set_date(self, date):
""" attach a date for faster night access interation """
self._properties["date"] = date
def set_auth(self, auth):
""" provide your authentification. """
self._properties["auth"] = auth
# ----------- #
# Downloader #
# ----------- #
def download_night_fluxcal(self, night, nodl=False, auth=None, download_dir="default",
show_progress=False, notebook=False, verbose=True,
overwrite=False, nprocess=None):
""" download SEDM fluxcalibration file for the given night
Parameters
----------
nodl: [bool] -optional-
do not launch the download, instead, returns
list of queried url and where they are going to be stored.
download_dir: [string] -optional-
Directory where the file should be downloaded.
If th
overwrite: [bool] -optional-
Check if the requested data already exist in the target download directory.
If so, this will skip the download except if overwrite is set to True.
nprocess: [None/int] -optional-
Number of parallel downloading you want to do.
If None, it will be set to 1 and will not use multiprocess
auth: [str, str] -optional-
[username, password] of you IRSA account.
If used, information stored in ~/.ztfquery will be ignored.
Returns
-------
Void or list (see nodl)
"""
relative_path = [l for l in self.get_night_data(night, source='pharos') if l.split("/")[-1].startswith("fluxcal")]
return self._download_from_relative_path_(relative_path, nodl=nodl, auth=auth, download_dir=download_dir,
show_progress=show_progress, notebook=notebook, verbose=verbose,
overwrite=overwrite, nprocess=nprocess)
def download_target_data(self, target, which="cube", extension="fits",
timerange=["2018-08-01", None],
nodl=False, auth=None, download_dir="default",
show_progress=False, notebook=False, verbose=True,
overwrite=False, nprocess=None ):
"""
download SEDM data associated to the given target.
Parameters
----------
target: [string]
Name of a source (e.g. ZTF18abuhzfc) of any part of a filename (i.e. 20180913_06_28_51)
which: [string] -optional-
kind oif data you want.
- cube / spec / ccd / all
extension: [string] -optional-
Extension of the file
- these exist depending on the file you want: fits / png / pdf / pkl / all
timerange: [iso format dates] -optional-
time range between which you are looking for file.
If the dates are not yet stored in you whatfiles.json, this will first download it.
if the second data is None, it means 'today'
nodl: [bool] -optional-
do not launch the download, instead, returns
list of queried url and where they are going to be stored.
download_dir: [string] -optional-
Directory where the file should be downloaded.
If th
overwrite: [bool] -optional-
Check if the requested data already exist in the target download directory.
If so, this will skip the download except if overwrite is set to True.
nprocess: [None/int] -optional-
Number of parallel downloading you want to do.
If None, it will be set to 1 and will not use multiprocess
auth: [str, str] -optional-
[username, password] of you IRSA account.
If used, information stored in ~/.ztfquery will be ignored.
Returns
-------
Void or list (see nodl)
"""
# Build the path (local and url)
if "astrom" in which or "guider" in which:
print("TMP which=astrom fixe")
relative_path = [l.replace("e3d","guider").replace(target,"astrom") for l in self.get_data_path(target, which="cube",extension="fits", timerange=timerange, source="pharos")]
else:
relative_path = self.get_data_path(target, which=which,extension=extension, timerange=timerange, source="pharos")
return self._download_from_relative_path_(relative_path, nodl=nodl, auth=auth, download_dir=download_dir,
show_progress=show_progress, notebook=notebook, verbose=verbose,
overwrite=overwrite, nprocess=nprocess)
# - Internal method
def _download_from_relative_path_(self, relative_path,
nodl=False, auth=None, download_dir="default",
show_progress=False, notebook=False, verbose=True,
overwrite=False, nprocess=None):
""" Given a relative path, this builds the data to download and where to.
Parameters
----------
nodl: [bool] -optional-
do not launch the download, instead, returns
list of queried url and where they are going to be stored.
download_dir: [string] -optional-
Directory where the | |
<filename>server.py<gh_stars>1-10
# coding: utf-8
import argparse
import json
# std
from datetime import datetime
# web
from flask import Flask, render_template, request
from flask import jsonify
from flask_cors import CORS, cross_origin
from flask_frozen import Freezer
from flask import Response
from flask_htpasswd import HtPasswdAuth
from kneed import KneeLocator
# mabed
from mabed.functions import Functions
import datetime
app = Flask(__name__, static_folder='browser/static', template_folder='browser/templates')
app.config['FLASK_HTPASSWD_PATH'] = '.htpasswd'
app.config['FLASK_SECRET'] = 'Hey Hey Kids, secure me!'
htpasswd = HtPasswdAuth(app)
# ==================================================================
# 1. Tests and Debug
# ==================================================================
# Enable CORS
# cors = CORS(app)
# app.config['CORS_HEADERS'] = 'Content-Type'
# Disable Cache
@app.after_request
def add_header(r):
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
# Settings Form submit
@app.route('/settings', methods=['POST'])
# @cross_origin()
def settings():
data = request.form
return jsonify(data)
@app.route('/event_descriptions')
def event_descriptions():
event_descriptions = functions.event_descriptions("test3")
events = []
for event in event_descriptions:
start_date = datetime.strptime(event[1], "%Y-%m-%d %H:%M:%S")
end_date = datetime.strptime(event[1], "%Y-%m-%d %H:%M:%S")
obj = {
"media": {
"url": "static/images/img.jpg"
},
"start_date": {
"month": start_date.month,
"day": start_date.day,
"year": start_date.year
},
"end_date": {
"month": end_date.month,
"day": end_date.day,
"year": end_date.year
},
"text": {
"headline": event[3],
"text": "<p>" + event[4] + "</p>"
}
}
events.append(obj)
res = {
"events": events
}
return jsonify(res)
# ==================================================================
# 2. MABED
# ==================================================================
# Run MABED
@app.route('/detect_events', methods=['POST', 'GET'])
# @cross_origin()
def detect_events():
data = request.form
index = data['index']
k = int(data['top_events'])
maf = float(data['min_absolute_frequency'])
mrf = float(data['max_relative_frequency'])
tsl = int(data['time_slice_length'])
p = float(data['p_value'])
theta = float(data['t_value'])
sigma = float(data['s_value'])
session = data['session']
filter = data['filter']
cluster = int(data['cluster'])
events=""
res = False
if filter=="all":
events = functions.event_descriptions(index, k, maf, mrf, tsl, p, theta, sigma, cluster)
elif filter == "proposedconfirmed":
filter = ["proposed","confirmed"]
events = functions.filtered_event_descriptions(index, k, maf, mrf, tsl, p, theta, sigma, session, filter, cluster)
else:
events = functions.filtered_event_descriptions(index, k, maf, mrf, tsl, p, theta, sigma, session, [filter], cluster)
if not events:
events = "No Result!"
else:
res = True
return jsonify({"result": res, "events":events})
# ==================================================================
# 3. Images
# ==================================================================
@app.route('/images')
def images():
with open('twitter2015.json') as f:
data = json.load(f)
clusters_num = len(data['duplicates'])
clusters = data['duplicates']
return render_template('images.html',
clusters_num=clusters_num,
clusters=clusters
)
# ==================================================================
# 4. Tweets
# ==================================================================
# Get Tweets
@app.route('/tweets', methods=['POST'])
# @cross_origin()
def tweets():
data = request.form
tweets= functions.get_tweets(index=data['index'], word=data['word'])
clusters= functions.get_clusters(index=data['index'], word=data['word'])
return jsonify({"tweets": tweets, "clusters": clusters})
# Get Tweets
@app.route('/tweets_filter', methods=['POST'])
# @cross_origin()
def tweets_filter():
data = request.form
tweets= functions.get_tweets_query_state(index=data['index'], word=data['word'], state=data['state'], session=data['session'])
clusters= functions.get_clusters(index=data['index'], word=data['word'])
return jsonify({"tweets": tweets, "clusters": clusters})
@app.route('/tweets_scroll', methods=['POST'])
# @cross_origin()
def tweets_scroll():
data = request.form
tweets= functions.get_tweets_scroll(index=data['index'], sid=data['sid'], scroll_size=int(data['scroll_size']))
return jsonify({"tweets": tweets})
# Get Event related tweets
@app.route('/event_tweets', methods=['POST'])
# @cross_origin()
def event_tweets():
data = request.form
index = data['index']
event = json.loads(data['obj'])
main_term = event['main_term'].replace(",", " ")
related_terms = event['related_terms']
tweets = functions.get_event_tweets(index, main_term, related_terms)
clusters = functions.get_event_clusters(index, main_term, related_terms)
return jsonify({"tweets": tweets, "clusters": clusters})
# Get Event related tweets
@app.route('/event_filter_tweets', methods=['POST'])
def event_filter_tweets():
data = request.form
index = data['index']
state = data['state']
session = data['session']
event = json.loads(data['obj'])
main_term = event['main_term'].replace(",", " ")
related_terms = event['related_terms']
tweets = functions.get_event_filter_tweets(index, main_term, related_terms, state, session)
clusters = functions.get_event_clusters(index, main_term, related_terms)
return jsonify({"tweets": tweets, "clusters": clusters})
@app.route('/tweets_state', methods=['POST'])
# @cross_origin()
def tweets_state():
data = request.form
tweets= functions.get_tweets_state(index=data['index'], session=data['session'], state=data['state'])
return jsonify({"tweets": tweets})
# Get Image Cluster tweets
@app.route('/cluster_tweets', methods=['POST', 'GET'])
# @cross_origin()
def cluster_tweets():
data = request.form
index = data['index']
cid = data['cid']
event = json.loads(data['obj'])
main_term = event['main_term'].replace(",", " ")
related_terms = event['related_terms']
tres = functions.get_event_tweets2(index, main_term, related_terms, cid)
event_tweets = tres
res = functions.get_cluster_tweets(index, cid)
tweets = res['hits']['hits']
tweets = {"results":tweets}
return jsonify({"tweets": tweets, "event_tweets": event_tweets})
# Get Search Image Cluster tweets
@app.route('/cluster_search_tweets', methods=['POST', 'GET'])
# @cross_origin()
def cluster_search_tweets():
data = request.form
index = data['index']
cid = data['cid']
word = data['word']
search_tweets = functions.get_big_tweets(index=index, word=word)
res = functions.get_cluster_tweets(index, cid)
tweets = res['hits']['hits']
tweets = {"results": tweets}
return jsonify({"tweets": tweets, "search_tweets": search_tweets})
# Get Event main image
@app.route('/event_image', methods=['POST'])
# @cross_origin()
def event_image():
data = request.form
index = data['index']
event = json.loads(data['obj'])
main_term = event['main_term'].replace(",", " ")
related_terms = event['related_terms']
image = functions.get_event_image(index, main_term, related_terms)
res = False
if image:
image = image['hits']['hits'][0]['_source']
res = True
return jsonify({"result":res, "image": image})
# Test & Debug
@app.route('/mark_valid', methods=['POST', 'GET'])
# @cross_origin()
def mark_valid():
data = request.form
res = functions.set_all_status("twitter2015", "session_Twitter2015", "proposed")
return jsonify(res)
@app.route('/mark_event', methods=['POST', 'GET'])
# @cross_origin()
def mark_event():
data = request.form
index = data['index']
session = data['session']
functions.set_status(index, session, data)
return jsonify(data)
@app.route('/mark_cluster', methods=['POST', 'GET'])
# @cross_origin()
def mark_cluster():
data = request.form
index = data['index']
session = data['session']
cid = data['cid']
state = data['state']
res = functions.set_cluster_state(index, session, cid, state)
return jsonify(res)
@app.route('/mark_tweet', methods=['POST', 'GET'])
# @cross_origin()
def mark_tweet():
data = request.form
index = data['index']
session = data['session']
tid = data['tid']
val = data['val']
functions.set_tweet_state(index, session, tid, val)
return jsonify(data)
@app.route('/mark_search_tweets', methods=['POST', 'GET'])
# @cross_origin()
def mark_search_tweets():
data = request.form
index = data['index']
session = data['session']
word= data['word']
state = data['state']
functions.set_search_status(index, session, state, word)
return jsonify(data)
@app.route('/mark_search_tweets_force', methods=['POST', 'GET'])
def mark_search_tweets_force():
data = request.form
index = data['index']
session = data['session']
word= data['word']
state = data['state']
functions.set_search_status_force(index, session, state, word)
return jsonify(data)
@app.route('/delete_field', methods=['POST', 'GET'])
# @cross_origin()
def delete_field():
up1 = functions.update_all("twitter2017", "tweet", "imagesCluster", "")
return jsonify(up1)
# ==================================================================
# 5. Export
# ==================================================================
@app.route('/export_events', methods=['POST', 'GET'])
# @cross_origin()
def export_events():
# data = request.form
# session = data['session_id']
# res = functions.get_session(session)
res = functions.get_session('6n7aD2QBU2R9ngE9d8IB')
index = res['_source']['s_index']
events = json.loads(res['_source']['events'])
for event in events:
main_term = event['main_term'].replace(",", " ")
# event['main_term']=main_term
related_terms = event['related_terms']
# tweets = functions.get_event_tweets(index, main_term, related_terms)
# tweets = tweets['hits']['hits']
event['tweets'] = 'tweets'
return jsonify(events)
# return Response(str(events),
# mimetype='application/json',
# headers={'Content-Disposition': 'attachment;filename=events.json'})
@app.route('/export_tweets', methods=['POST', 'GET'])
# @cross_origin()
def export_tweets():
session = request.args.get('session')
# data = request.form
# session = data['session_id']
# res = functions.get_session(session)
res = functions.get_session(session)
index = res['_source']['s_index']
events = json.loads(res['_source']['events'])
for event in events:
main_term = event['main_term'].replace(",", " ")
# event['main_term']=main_term
related_terms = event['related_terms']
# tweets = functions.get_event_tweets(index, main_term, related_terms)
# tweets = tweets['hits']['hits']
event['tweets'] = 'tweets'
return jsonify(session)
# return Response(str(events),
# mimetype='application/json',
# headers={'Content-Disposition': 'attachment;filename=events.json'})
@app.route('/export_confirmed_tweets', methods=['POST', 'GET'])
# @cross_origin()
def export_confirmed_tweets():
session = request.args.get('session')
res = functions.get_session(session)
index = res['_source']['s_index']
s_name = res['_source']['s_name']
tweets = functions.export_event(index,s_name)
return Response(str(tweets),
mimetype='application/json',
headers={'Content-Disposition':'attachment;filename='+s_name+'tweets.json'})
# ==================================================================
# 6. Beta
# ==================================================================
@app.route('/event_tweets_count', methods=['POST', 'GET'])
def event_tweets_count():
data = request.form
index = data['index']
event = json.loads(data['obj'])
main_term = event['main_term'].replace(",", " ")
related_terms = event['related_terms']
count = functions.get_event_tweets_count(index, main_term, related_terms)
all_count = functions.get_all_count(index)
percentage = 100*(count/all_count)
res = {'count':count, 'all': all_count, 'percentage':percentage}
return jsonify(res)
@app.route('/get_all_count', methods=['POST', 'GET'])
def get_all_count():
data = request.form
index = data['index']
count = functions.get_all_count(index)
res = {'count':count}
return jsonify(res)
@app.route('/get_words_count', methods=['POST', 'GET'])
def get_words_count():
data = request.form
index = data['index']
words = data['words']
count = functions.get_words_count(index, words)
res = {'count':count}
return jsonify(res)
@app.route('/get_keywords', methods=['POST', 'GET'])
def get_keywords():
data = request.form
index = data['index']
words = data['words']
sd = data['sd']
ed = data['ed']
count = data['count']
# event = json.loads(data['obj'])
# main_term = event['main_term'].replace(",", " ")
# related_terms = event['related_terms']
start_time = int(sd) / 1000
start_time = datetime.datetime.fromtimestamp(start_time)
end_time = int(ed) / 1000
end_time = datetime.datetime.fromtimestamp(end_time)
start_ms = start_time.timestamp() * 1000
end_ms = end_time.timestamp() * 1000
# count = functions.get_range_count(index, start_ms, end_ms)
newKeywords = functions.process_range_tweets(index, start_ms, end_ms, words, 100)
res = {"words":words, "count":count, "newKeywords":newKeywords}
# res = {"words":words, "count":count}
return jsonify(res)
@app.route('/get_word2vec', methods=['POST', 'GET'])
def get_word2vec():
# data = request.form
index = 'twitter2017'
words = "fêtes"
count = 10
# count = functions.get_range_count(index, start_ms, end_ms)
newKeywords = functions.process_w2v_tweets(index, words, 10)
res = {"words":words, "count":count, "newKeywords":newKeywords}
# res = {"words":words, "count":count}
print(res)
return jsonify(res)
@app.route('/get_sse', methods=['POST', 'GET'])
def get_sse():
data = request.form
index = data['index']
words = data['words']
event = json.loads(data['obj'])
keywords = json.loads(data['keywords'])
newKeywords = keywords['words']
main_term = event['main_term'].replace(",", " ")
related_terms = event['related_terms']
sd = data['sd']
ed = data['ed']
start_time = int(sd) / 1000
start_time = datetime.datetime.fromtimestamp(start_time)
end_time = int(ed) / 1000
end_time = datetime.datetime.fromtimestamp(end_time)
start_ms = start_time.timestamp() * 1000
end_ms = end_time.timestamp() * 1000
sse = {}
sse_points = []
# mean = functions.getMean(index, main_term, related_terms)
# sse0 = functions.getSSE(index, main_term, related_terms, mean)
# sse[0]=sse0
related_string = ""
least_value = 100.0
for t in related_terms:
related_string = related_string + " "+ t['word']
if float(t['value'])<least_value:
least_value=float(t['value'])
words = main_term +" "+ related_string
# newKeywords = functions.process_range_tweets(index, start_ms, end_ms, words, 20)
# newKeywords = [('couleurs', 0.9541982412338257), ('cette…', 0.9535157084465027), ('consultation', 0.9513106346130371), ('tgvmax', 0.9512830972671509), ('lyonmag', 0.9508819580078125), ('vous…', 0.9507380127906799), ('sublime', 0.9503788948059082), ('le_progres', 0.9499937891960144), ('vue', 0.9492042660713196), ('oliviermontels', 0.9490641355514526), ('sport2job', 0.9481754899024963), ('lyonnai…', 0.9481167197227478), ('hauteurs', 0.9463335275650024), ('illuminations', 0.9462761282920837), ('familial', 0.9458074569702148), ('fdl2017…', 0.945579469203949), ('leprogreslyon', 0.9455731511116028), ('weekend', 0.9454441070556641), ('pensant', 0.9449157118797302), ('radioscoopinfos', 0.9441419839859009)]
print("---------------")
print("newKeywords")
print(newKeywords)
print("related_terms")
print(related_terms)
print("---------------")
sse2 = []
for i in range(0, 40):
temp_terms = []
temp_terms = temp_terms + | |
# -*- coding: utf-8 -*-
import os
import mab.gd.logging as logging
import emcee
from numpy import *
import numpy
import scipy
from mab.gd import gdfast_schw
from kaplot import *
logger = logging.getLogger("gd.schw.solution2")
class Dummy(object):
pass
dummy = Dummy()
def lnprob(u):
x = exp(u)
x /= sum(x)
logp = sum([k.logp(x) for k in dummy.opts])
#print logp
return logp
def domcmc(x, opts):
dummy.opts = opts
N = len(x)
x0 = x
ndim = N
nwalkers = 2*ndim
grad = zeros(N)
for opt in opts:
opt.dlogpdx(x, grad)
def gen():
x = array([x0[i]*(1+1e-8*(random.random()*2-1)) for i in range(ndim)])
x /= sum(x)
return x
p0 = [log(gen()) for i in xrange(nwalkers)]
dummy.opts = opts
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=[], threads=50)
result = sampler.run_mcmc(p0, 1000)
print sampler.flatchain.shape
x = sampler.flatchain
print "x shape", x.shape
print "acceptance_fraction", sampler.acceptance_fraction
logprob = array(sampler.lnprobability.flat)
mozaic(2,2,box)
#m = x.mean(axis=0).reshape(20, 8)
#s = x.std(axis=0).reshape(20, 8)
m = x.mean(axis=0).reshape(8, 20)
s = x.std(axis=0).reshape(8, 20)
#mozaic(2,2,box)
select(0, 0)
indexedimage(x0.reshape(8,20))
select(0, 1)
indexedimage(m)
select(1, 0)
indexedimage(s)
select(1,1)
histogram(logprob, bincount=100)
l = lnprob(log(x0))
vline(l, color="red")
xlim(l-40, l+1)
draw()
import pdb
pdb.set_trace()
dsa
class Discrete(object):
def __init__(self, modelpath, light_model, aperture_light, profile_model, schwsetname, schwmodelname, storage_2d_m0, storage_2d_m2, storage_2d_m4, storage_3d, storage_2d_losvd, fitdensity2d, fitdensity3d, observation, binned_data_m2, binned_data_m4, dfgrid, max_iterations=1000, regularization=None, postfix=""):
self.modelpath = modelpath
self.light_model = light_model
self.profile_model = profile_model
self.schwsetname = schwsetname
self.schwmodelname = schwmodelname
self.storage_2d_m0 = storage_2d_m0
self.storage_2d_m2 = storage_2d_m2
self.storage_2d_m4 = storage_2d_m4
self.storage_3d = storage_3d
self.storage_2d_losvd = storage_2d_losvd
self.aperture_light = aperture_light
self.binned_data_m2 = binned_data_m2
self.binned_data_m4 = binned_data_m4
#self.storage_2d_binned = storage_2d_binned
self.fitdensity2d = fitdensity2d
self.fitdensity3d = fitdensity3d
self.observation = observation
self.dfgrid = dfgrid
self.max_iterations = max_iterations
self.regularization = regularization
#self.regularization_delta = regularization_delta
#self.use_jeans = use_jeans
#self.jeans_fraction = jeans_fraction
self.dirname = os.path.join(self.modelpath, "schw", self.schwsetname, self.schwmodelname)
self.logger = logging.getLogger("gd.schw.solution.likelihood")
self.postfix = postfix
def run(self, args, opts, scope):
self.init()
self.solve(scope)
def init(self):
self.observation.load()
self.storage_2d_m0.init()
self.storage_2d_m0.load()
self.storage_2d_m2.init()
self.storage_2d_m2.load()
self.storage_2d_m4.init()
self.storage_2d_m4.load()
self.binned_data_m2.load()
self.binned_data_m4.load()
#self.storage_2d_binned.init()
#self.storage_2d_binned.load()
self.storage_3d.init()
self.storage_3d.load()
self.storage_2d_losvd.load()
#self.storage_2d.aperture.load()
self.aperture_light.load()
def solve(self, scope):
stars = self.observation.stars
self.logger.info("using %d stars/observations" % len(stars))
#stars_inrange = stars.filter(lambda star: self.storage_2d.aperture.inrange(star.xi, star.eta))
stars_inrange = stars.filter(lambda star: self.storage_2d_losvd.aperture.inrange(star.xi, star.eta))
self.logger.info("stars in aperture range : %d" % len(stars_inrange))
self.logger.info("stars outside aperture range: %d" % (len(stars)-len(stars_inrange)))
vmax = self.storage_2d_losvd.vmax
#print "vmax", vmax
delta_v = 2*vmax/self.storage_2d_losvd.Nv
#print "res", delta_v
for star in stars:
star.aperture_index = self.storage_2d_losvd.aperture.findindex(star.xi, star.eta)
losvds = self.storage_2d_losvd.losvds
stars_invrange = stars.filter(lambda star: abs(star.vlos) < vmax)
self.logger.info("stars in velocity range : %d" % len(stars_invrange))
self.logger.info("stars outside velocity range: %d" % (len(stars)-len(stars_invrange)))
stars = stars_invrange
sigma_v = 2.01
numpy.random.seed(8)
for star in stars:
star.vlos = star.vlos_true# + numpy.random.normal(0, sigma_v)
#star.vlos = star.vlos_true + numpy.random.normal(0, sigma_v)
#print star.vlos, vmax, self.storage_2d_losvd.Nv
star.v_index = int(((star.vlos+vmax)/(2*vmax)) * self.storage_2d_losvd.Nv);
outlier = True
for losvd in losvds:
if losvd[star.v_index, star.aperture_index] != 0:
outlier = False
break
star.is_outlier = outlier
stars_no_outlier = stars.filter(lambda star: not star.is_outlier)
self.logger.info("non-outlier stars : %d" % len(stars_no_outlier))
self.logger.info("outlier stars : %d" % (len(stars)-len(stars_no_outlier)))
Rborders = arange(self.storage_2d_losvd.NR+1) / (0.0+self.storage_2d_losvd.NR) * (self.storage_2d_losvd.Rmax)
R1s = Rborders[0:-1]
R2s = Rborders[1:]
dRs = R2s - R1s
delta_R = R2s[0] - R1s[0]
assert all(abs(dRs - delta_R) < 1e-10), "no constant dR"
#print Rborders
self.rho2d_target = array([self.light_model.cumdensityR(R1, R2, M=1.) for R1, R2 in zip(R1s, R2s)])
rho2ds = sum(losvds, axis=1)
rho2dmatrix = sum(losvds, axis=1)
rho3dmatrix = self.storage_3d.moments3d[:,0,:]
#rho2dmatrix = self.storage_2d_m0.moments[:,0,:]
r1s = self.storage_3d.rborders[:-1]
r2s = self.storage_3d.rborders[1:]
delta_r = r2s[0] - r1s[0]
#R1s = self.storage_2d.rborders[:-1]
#R2s = self.storage_2d.rborders[1:]
self.rho3d_target = array([self.light_model.cumdensityr(r1, r2, M=1.) for r1, r2 in zip(r1s, r2s)])
#for i in range(losvds.shape[0]):
#for j in range(losvds.shape[1]):
# print i, sum(losvds[i]),
for i in range(losvds.shape[0]):
#print sum(losvds[i])
for j in range(losvds.shape[2]):
#dens = sum(losvds[i,:,j])
#if dens > 0:
#print self.rho2d_target.shape
#print losvds.shape
#losvds[i,j,:] /= self.rho2d_target
#losvds[i,:,j] = scipy.ndimage.gaussian_filter(losvds[i,:,j], sigma_v/delta_v, mode='constant')
pass
losvds[i] = scipy.ndimage.gaussian_filter(losvds[i], [sigma_v/delta_v, 0.51])
losvds[i] /= (delta_v * delta_R)
#print
#print losvds.shape, delta_v, delta_R, Rborders[0], Rborders[-1]
#print Rborders
#for i in range(losvds.shape[0]):
#for j in range(losvds.shape[1]):
# print i, sum(losvds[i]*delta_v*delta_R),
v_indices = [star.v_index for star in stars]
aperture_indices = [star.aperture_index for star in stars]
#print losvds.shape
pmatrix = array(list((losvds/(self.rho2d_target/delta_R))[:,v_indices, aperture_indices]))
pmatrix = array(list((losvds)[:,v_indices, aperture_indices]))
pmatrix = pmatrix * 1.
#print pmatrix.shape
rho2d_error = self.rho2d_target.max() * 0.000001*0.5 * 0.1
error_x = 1e-3
if 1:
filename = os.path.join(self.modelpath, "df/orbitweights_tang.npy")
orbitweights = load(filename)
c = orbitweights.flatten()
c /= sum(c)
x = c
xtrue = x
if 0:
self.x0 = x
self.true_losvd = numpy.tensordot(self.storage_2d_losvd.losvds, c, axes=[(0,),(0,)])
self.true_rho2d = numpy.tensordot(self.storage_2d_losvd.masses, c, axes=[(0,),(0,)])
#self.true_rho2d = numpy.tensordot(rho2ds, c, axes=[(0,),(0,)])
self.true_rho3d = numpy.tensordot(rho3dmatrix, c, axes=[(0,),(0,)])
filename = os.path.join(self.modelpath, "df/losvd_tang.npy")
save(filename, self.true_losvd)
filename = os.path.join(self.modelpath, "df/masses_tang.npy")
save(filename, self.true_rho2d)
dsa
if 0:
#graph(self.true_rho3d)
#graph(self.rho3d_target, color="red")
#avg =
graph((self.true_rho3d-self.rho3d_target)/self.rho3d_target.max(), color="red")
draw()
#import pdb; pdb.set_trace()
else:
filename = os.path.join(self.modelpath, "df/losvd_tang.npy")
self.true_losvd = load(filename)
filename = os.path.join(self.modelpath, "df/masses_tang.npy")
self.true_rho2d = load(filename)
self.true_losvd = scipy.ndimage.gaussian_filter(self.true_losvd, [sigma_v/delta_v, 0.51])
self.true_losvd /= (delta_v * delta_R)
debug = False
if 0:
filename = os.path.join(self.dirname, "results/orbitweights" +self.postfix +".npy")
x = numpy.load(filename)
logger.info("loading orbitweights %s" % filename)
else:
x = x*0 + 1.
x = random.random(len(x))
x /= sum(x)
u = log(x)
#print rho2dmatrix.shape
rho3dmatrix = rho3dmatrix * 1
#rhoerror = maximum(self.rho3d_target*rho2d_error, self.rho3d_target.max() * 0.001)
rhoerror = self.rho3d_target*rho2d_error
s = self.rho3d_target/self.rho3d_target.max()
rhoerror = self.rho3d_target.max() * 0.05 * maximum(0.1, s) + self.rho3d_target * 0
rhoerror = self.rho3d_target.max() * 0.07 * maximum(0.1/7, s) + self.rho3d_target * 0
rhoerror = maximum(self.rho3d_target.max() * 0.01*1.5, self.rho3d_target * 0.01*2) + self.rho3d_target * 0
#rhoerror = maximum(rhoerror*1e-4, rho2d_error)
#self.opt = gdfast_schw.OptimizationProblemSchw(pmatrix, rho3dmatrix, x, self.rho3d_target, rhoerror, error_x, True, False, True)
fit_mass_3d = False
#fit_mass_3d = True
if fit_mass_3d:
mass_matrix = rho3dmatrix
mass_target = self.rho3d_target
mass_error = rhoerror
else:
rhoerror = maximum(self.rho2d_target.max() * 0.01*0.05, self.rho2d_target * 0.001)# + self.rho3d_target * 0
mass_matrix = rho2dmatrix
mass_target = self.rho2d_target
mass_error = rhoerror
entropy_scale = 1e-20
self.opt = gdfast_schw.OptimizationProblemSchw(pmatrix, mass_matrix, x, mass_target, mass_error, error_x, entropy_scale, True, True, True)
#print "true L?", self.opt.likelihood(log(xtrue))
self.opt_kin = gdfast_schw.OptimizationProblemSchw(pmatrix, mass_matrix, x, mass_target, mass_error, error_x, entropy_scale, True, False, False)
self.opts = [
gdfast_schw.OptimizationProblemSchw(pmatrix, mass_matrix, x, mass_target, mass_error, error_x, 0, True, False, False),
gdfast_schw.OptimizationProblemSchw(pmatrix, mass_matrix, x, mass_target, mass_error, error_x, 0, False, True, False),
gdfast_schw.OptimizationProblemSchw(pmatrix, mass_matrix, x, mass_target, mass_error, error_x, 0, False, False, True),
gdfast_schw.OptimizationProblemSchw(pmatrix, mass_matrix, x, mass_target, mass_error, error_x, entropy_scale, False, False, False),
]
debug = False
#debug = True
if 1:
#x = numpy.load("xlast.npy")
#print dir(self.light_model)
N = 250000
light_profile = self.light_model.light_profile
rs = light_profile.sample_r(N=N, rmax=100.)
costheta = numpy.random.random(N) * 2 - 1
phi = numpy.random.random(N) * 2 * pi
eta = numpy.random.random(N) * 2 * pi
theta = numpy.arccos(costheta)
#sintheta = numpy.sqrt(1-costheta**2)
sintheta = numpy.sin(theta)
#print r.shape, sintheta.shape, phi.shape, len(dt)
xp = x
x = rs * sintheta * numpy.cos(phi)
y = rs * sintheta * numpy.sin(phi)
Rs = sqrt(x**2+y**2)
x = xp
#ps = self.
Rs = Rs[Rs<1.5]
rs = rs[rs<1.5]
#rs = rs[rs>0.1]
#normal = scipy.integrate.quad(lambda R: light_profile.densityR(R,M=1.)*2*pi*R, 0, 1.5)[0]
normal = scipy.integrate.quad(lambda r: light_profile.densityr(r,M=1.)*4*pi*r**2, 0, 1.5)[0]
if debug:
print "normal", normal
#normal = 1.
if fit_mass_3d:
ps = [log(light_profile.densityr(r,M=1.)*4*pi*r**2/normal) for r in rs]
else:
ps = [log(light_profile.densityR(R,M=1.)*2*pi*R/normal) for R in Rs]
N = len(ps)
if debug:
print N
print "tot p", sum(ps)
print "mean p", mean(ps)
print rho3dmatrix.shape
if fit_mass_3d:
mass_indices = [int(r/1.5*100) for r in rs]
mass_matrix = rho3dmatrix[:,mass_indices] * 1. / delta_r
mass_matrixN = rho3dmatrix * 1./delta_r
totalmass_matrix = sum(rho3dmatrix, axis=1)
ptotalmass_matrix = sum(self.storage_2d_losvd.masses, axis=1)
counts, bins = numpy.histogram(rs, 100, [0, 1.5], new=True)
else:
mass_indices = [int(R/1.5*30) for R in Rs]
mass_matrix = self.storage_2d_losvd.masses[:,mass_indices] * 1. / delta_R
mass_matrixN = self.storage_2d_losvd.masses * 1. / delta_R
totalmass_matrix = ptotalmass_matrix = sum(self.storage_2d_losvd.masses, axis=1)
counts, bins = numpy.histogram(Rs, 30, [0, 1.5])
counts /= sum(counts)
counts = counts / 2000
if debug:
print "2d, delta_R", delta_R
#mass = dot(self.storage_2d_losvd.masses.T, xtrue)
if debug:
print "total 3d", sum(dot(rho3dmatrix.T, xtrue))
print "total 2d", sum(dot(self.storage_2d_losvd.masses.T, xtrue))
print "normal check", dot(xtrue, totalmass_matrix)
opt_matrix_mass = gdfast_schw.OptimizationMatrix(mass_matrix, totalmass_matrix)
counts = array(counts).astype(float64)# * 1.
#print counts, sum(counts)
rho3dmatrix = rho3dmatrix * 1.
#print "-->", rho3dmatrix.shape, counts.shape
#print rho3dmatrix.dtype, counts.dtype
counts = self.true_rho2d
counts /= sum(counts)
counts *= 200000
opt_matrix_massN = gdfast_schw.OptimizationMatrixN(mass_matrixN, counts, totalmass_matrix)
if debug:
print "logp", opt_matrix_mass.logp(xtrue), opt_matrix_mass.logp(x)
print "logp", opt_matrix_massN.logp(xtrue), opt_matrix_massN.logp(x)
print opt_matrix_mass.logp(xtrue)/N
print sum(self.rho3d_target)
#print (self.rho3d_target-mass)/self.rho3d_target
#print "x =", x, sum(x)
if 0:
box()
#mask = (rho3dmatrix/delta_r) > 1
#rho3dmatrix[mask] = 1000
I = rho3dmatrix * 1.
I /= I.max()
I = log10(I)
I[I<-6] = -6
indexedimage(I)
draw()
diff = log(dot(x, mass_matrix))-log(dot(xtrue, mass_matrix))
indices = argsort(diff)
#import pdb
#pdb.set_trace()
#sysdsa
if 1:
#x = xtrue
#print "sum(x)", sum(x)
opt_matrix_kin = gdfast_schw.OptimizationMatrix(pmatrix, ptotalmass_matrix)
counts_kin, binsx, biny = numpy.histogram2d([star.v_index for star in stars], [star.aperture_index for star in stars], bins=[30,30], range=[(0,30),(0, 30)])
if 0:
counts_kin2 = numpy.zeros((30, 30))
for star in stars:
counts_kin2[star.v_index, star.aperture_index] += 1
mozaic(2,2,box)
indexedimage(counts_kin)
select(0,1)
indexedimage(counts_kin2)
draw()
mask = counts_kin > 0
counts_kin = counts_kin[mask]
counts_kin = counts_kin * 1.
pmatrixN = losvds[:,mask]
#debug = True
if 1:
pmatrixN = losvds * 1.0
pmatrixN = pmatrixN.reshape((pmatrixN.shape[0], -1)) * 1.
counts_kin = self.true_losvd * 1.
counts_kin = counts_kin.reshape(-1) * 1.
counts_kin /= sum(counts_kin)
counts_kin *= 2000
#@import pdb
#pdb.set_trace()
if debug:
print "%d versus %d speedup: %f" % (pmatrixN.shape[1], len(stars), len(stars)*1./pmatrixN.shape[1])
#pmatrixN = array(list((losvds/(self.rho2d_target/delta_R))[:,v_indices, aperture_indices]))
#pmatrix = array(list((losvds)[:,v_indices, aperture_indices]))
pmatrixN = pmatrixN * 1.0
#print sum(counts_kin == 0)
opt_matrix_kinN = gdfast_schw.OptimizationMatrixN(pmatrixN, counts_kin, ptotalmass_matrix)
for k in [pmatrixN, counts_kin, ptotalmass_matrix]:
print k.min(), k.max(), k.sum(), k.std()
dsa
opt_norm = gdfast_schw.OptimizationNormalize(1.-.000001, 0.001)
opt_entropy = gdfast_schw.OptimizationEntropy(1.e-2)
opt = self.opt_kin
u = log(x)
if debug:
for i in [0, 1]:
x1 = x * 1.
x2 = x * 1.
dx = 1e-8
x2[i] += dx
grad = x * 0
for opt in [opt_matrix_kin, opt_matrix_kinN, opt_entropy]:#, opt_matrix_mass, opt_matrix_massN, opt_norm]:
grad = x * 0
print u.shape, grad.shape
opt.dlogpdx(x, grad)
#sys.exit(0)
w1 = opt.logp(x1)
w2 = opt.logp(x2)
print "w", opt.logp(x)
print "grad", grad[i]
print "grad", (w2-w1)/dx#/x[i]
print
print
#opt_matrix_kin.dlogpdx(x, grad)
#grad *= x
#print "grad3", grad[i]
#print "logp", opt.likelihood(u)
#print "logp", opt_matrix_kin.logp(x)
print
sys.exit(0)
#x = x * 0 + 1
#x /= sum(x)
global calls
calls = 0
debug = False
debug=True
opts = [opt_matrix_kinN, opt_matrix_massN, opt_norm]
def f_and_g(x):
global calls
#if calls > 10:
# numpy.save("xlast.npy", x)
# dsa
#calls += 1
if 1:
grad = x * 0
logp = opt_matrix_kinN.logp(x)*1 +\
opt_matrix_massN.logp(x)*0 +\
opt_norm.logp(x) * 1
#+\
#opt_entropy.logp(x) * 1.
opt_matrix_kinN.dlogpdx(x, grad)
#opt_matrix_massN.dlogpdx(x, grad)
opt_norm.dlogpdx(x, grad)
#opt_entropy.dlogpdx(x, grad)
if debug:
print "%10f %10f %10f %10f %10f" % (logp, sum(x), dot(totalmass_matrix, x/sum(x)), dot(ptotalmass_matrix, x/sum(x)), dot(losvds.T, x).sum() * delta_R * delta_v / dot(ptotalmass_matrix, x/sum(x)))
#print
if 0:
print ".", sum(x), dot(totalmass_matrix, x/sum(x))
print logp
for i in [0, 10, 100]:
x1 = x * 1.#/sum(x)
x2 = x * 1.#/sum(x)
dx = 1e-7
x2[i] += dx
#for opt in [opt_matrix_kin, opt_matrix_massN, opt_norm]:
for opt in [opt_matrix_massN]:
grad = x * 0
opt.dlogpdx(x, grad)
w1 = opt.logp(x1)
w2 = opt.logp(x2)
print "grad", grad[i]
print "grad man", (w2-w1)/dx#/x[i]
print
return -logp, -grad
u = log(x)
#print u
w = -self.opt.likelihood(u)
grad = u * 0
self.opt.dfdx(u, grad)
#print w
if 0:
print w
for i in [0, 1]:
u1 = u * | |
execution
uncertain_nodes.append(other_node)
return uncertain_nodes
@staticmethod
def _defines_name_raises_or_returns(
name: str, handler: nodes.ExceptHandler
) -> bool:
"""Return True if some child of `handler` defines the name `name`,
raises, or returns.
"""
def _define_raise_or_return(stmt: nodes.NodeNG) -> bool:
if isinstance(stmt, (nodes.Raise, nodes.Return)):
return True
if isinstance(stmt, nodes.Assign):
for target in stmt.targets:
for elt in utils.get_all_elements(target):
if isinstance(elt, nodes.AssignName) and elt.name == name:
return True
if isinstance(stmt, nodes.If):
# Check for assignments inside the test
if (
isinstance(stmt.test, nodes.NamedExpr)
and stmt.test.target.name == name
):
return True
if isinstance(stmt.test, nodes.Call):
for arg_or_kwarg in stmt.test.args + [
kw.value for kw in stmt.test.keywords
]:
if (
isinstance(arg_or_kwarg, nodes.NamedExpr)
and arg_or_kwarg.target.name == name
):
return True
return False
for stmt in handler.get_children():
if _define_raise_or_return(stmt):
return True
if isinstance(stmt, (nodes.If, nodes.With)):
if any(
_define_raise_or_return(nested_stmt)
for nested_stmt in stmt.get_children()
):
return True
return False
@staticmethod
def _check_loop_finishes_via_except(
node: nodes.NodeNG, other_node_try_except: nodes.TryExcept
) -> bool:
"""Check for a case described in https://github.com/PyCQA/pylint/issues/5683.
It consists of a specific control flow scenario where the only
non-break exit from a loop consists of the very except handler we are
examining, such that code in the `else` branch of the loop can depend on it
being assigned.
Example:
for _ in range(3):
try:
do_something()
except:
name = 1 <-- only non-break exit from loop
else:
break
else:
print(name)
"""
if not other_node_try_except.orelse:
return False
closest_loop: Optional[
Union[nodes.For, nodes.While]
] = utils.get_node_first_ancestor_of_type(node, (nodes.For, nodes.While))
if closest_loop is None:
return False
if not any(
else_statement is node or else_statement.parent_of(node)
for else_statement in closest_loop.orelse
):
# `node` not guarded by `else`
return False
for inner_else_statement in other_node_try_except.orelse:
if isinstance(inner_else_statement, nodes.Break):
break_stmt = inner_else_statement
break
else:
# No break statement
return False
def _try_in_loop_body(
other_node_try_except: nodes.TryExcept, loop: Union[nodes.For, nodes.While]
) -> bool:
"""Return True if `other_node_try_except` is a descendant of `loop`."""
return any(
loop_body_statement is other_node_try_except
or loop_body_statement.parent_of(other_node_try_except)
for loop_body_statement in loop.body
)
if not _try_in_loop_body(other_node_try_except, closest_loop):
for ancestor in closest_loop.node_ancestors():
if isinstance(ancestor, (nodes.For, nodes.While)):
if _try_in_loop_body(other_node_try_except, ancestor):
break
else:
# `other_node_try_except` didn't have a shared ancestor loop
return False
for loop_stmt in closest_loop.body:
if NamesConsumer._recursive_search_for_continue_before_break(
loop_stmt, break_stmt
):
break
else:
# No continue found, so we arrived at our special case!
return True
return False
@staticmethod
def _recursive_search_for_continue_before_break(
stmt: nodes.Statement, break_stmt: nodes.Break
) -> bool:
"""Return True if any Continue node can be found in descendants of `stmt`
before encountering `break_stmt`, ignoring any nested loops.
"""
if stmt is break_stmt:
return False
if isinstance(stmt, nodes.Continue):
return True
for child in stmt.get_children():
if isinstance(stmt, (nodes.For, nodes.While)):
continue
if NamesConsumer._recursive_search_for_continue_before_break(
child, break_stmt
):
return True
return False
@staticmethod
def _uncertain_nodes_in_try_blocks_when_evaluating_except_blocks(
found_nodes: List[nodes.NodeNG], node_statement: nodes.Statement
) -> List[nodes.NodeNG]:
"""Return any nodes in ``found_nodes`` that should be treated as uncertain because they
are in a try block and the ``node_statement`` being evaluated is in one of its except handlers.
"""
uncertain_nodes: List[nodes.NodeNG] = []
closest_except_handler = utils.get_node_first_ancestor_of_type(
node_statement, nodes.ExceptHandler
)
if closest_except_handler is None:
return uncertain_nodes
for other_node in found_nodes:
other_node_statement = other_node.statement(future=True)
# If the other statement is the except handler guarding `node`, it executes
if other_node_statement is closest_except_handler:
continue
# Ensure other_node is in a try block
(
other_node_try_ancestor,
other_node_try_ancestor_visited_child,
) = utils.get_node_first_ancestor_of_type_and_its_child(
other_node_statement, nodes.TryExcept
)
if other_node_try_ancestor is None:
continue
if (
other_node_try_ancestor_visited_child
not in other_node_try_ancestor.body
):
continue
# Make sure nesting is correct -- there should be at least one
# except handler that is a sibling attached to the try ancestor,
# or is an ancestor of the try ancestor.
if not any(
closest_except_handler in other_node_try_ancestor.handlers
or other_node_try_ancestor_except_handler
in closest_except_handler.node_ancestors()
for other_node_try_ancestor_except_handler in other_node_try_ancestor.handlers
):
continue
# Passed all tests for uncertain execution
uncertain_nodes.append(other_node)
return uncertain_nodes
@staticmethod
def _uncertain_nodes_in_try_blocks_when_evaluating_finally_blocks(
found_nodes: List[nodes.NodeNG], node_statement: nodes.Statement
) -> List[nodes.NodeNG]:
uncertain_nodes: List[nodes.NodeNG] = []
(
closest_try_finally_ancestor,
child_of_closest_try_finally_ancestor,
) = utils.get_node_first_ancestor_of_type_and_its_child(
node_statement, nodes.TryFinally
)
if closest_try_finally_ancestor is None:
return uncertain_nodes
if (
child_of_closest_try_finally_ancestor
not in closest_try_finally_ancestor.finalbody
):
return uncertain_nodes
for other_node in found_nodes:
other_node_statement = other_node.statement(future=True)
(
other_node_try_finally_ancestor,
child_of_other_node_try_finally_ancestor,
) = utils.get_node_first_ancestor_of_type_and_its_child(
other_node_statement, nodes.TryFinally
)
if other_node_try_finally_ancestor is None:
continue
# other_node needs to descend from the try of a try/finally.
if (
child_of_other_node_try_finally_ancestor
not in other_node_try_finally_ancestor.body
):
continue
# If the two try/finally ancestors are not the same, then
# node_statement's closest try/finally ancestor needs to be in
# the final body of other_node's try/finally ancestor, or
# descend from one of the statements in that final body.
if (
other_node_try_finally_ancestor is not closest_try_finally_ancestor
and not any(
other_node_final_statement is closest_try_finally_ancestor
or other_node_final_statement.parent_of(
closest_try_finally_ancestor
)
for other_node_final_statement in other_node_try_finally_ancestor.finalbody
)
):
continue
# Passed all tests for uncertain execution
uncertain_nodes.append(other_node)
return uncertain_nodes
# pylint: disable=too-many-public-methods
class VariablesChecker(BaseChecker):
"""BaseChecker for variables.
Checks for
* unused variables / imports
* undefined variables
* redefinition of variable from builtins or from an outer scope
* use of variable before assignment
* __all__ consistency
* self/cls assignment
"""
__implements__ = IAstroidChecker
name = "variables"
msgs = MSGS
priority = -1
options = (
(
"init-import",
{
"default": 0,
"type": "yn",
"metavar": "<y or n>",
"help": "Tells whether we should check for unused import in "
"__init__ files.",
},
),
(
"dummy-variables-rgx",
{
"default": "_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_",
"type": "regexp",
"metavar": "<regexp>",
"help": "A regular expression matching the name of dummy "
"variables (i.e. expected to not be used).",
},
),
(
"additional-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of additional names supposed to be defined in "
"builtins. Remember that you should avoid defining new builtins "
"when possible.",
},
),
(
"callbacks",
{
"default": ("cb_", "_cb"),
"type": "csv",
"metavar": "<callbacks>",
"help": "List of strings which can identify a callback "
"function by name. A callback name must start or "
"end with one of those strings.",
},
),
(
"redefining-builtins-modules",
{
"default": (
"six.moves",
"past.builtins",
"future.builtins",
"builtins",
"io",
),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of qualified module names which can have objects "
"that can redefine builtins.",
},
),
(
"ignored-argument-names",
{
"default": IGNORED_ARGUMENT_NAMES,
"type": "regexp",
"metavar": "<regexp>",
"help": "Argument names that match this expression will be "
"ignored. Default to name with leading underscore.",
},
),
(
"allow-global-unused-variables",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Tells whether unused global variables should be treated as a violation.",
},
),
(
"allowed-redefined-builtins",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list>",
"help": "List of names allowed to shadow builtins",
},
),
)
def __init__(self, linter=None):
super().__init__(linter)
self._to_consume: List[NamesConsumer] = []
self._checking_mod_attr = None
self._loop_variables = []
self._type_annotation_names = []
self._except_handler_names_queue: List[
Tuple[nodes.ExceptHandler, nodes.AssignName]
] = []
"""This is a queue, last in first out."""
self._postponed_evaluation_enabled = False
def open(self) -> None:
"""Called when loading the checker."""
self._is_undefined_variable_enabled = self.linter.is_message_enabled(
"undefined-variable"
)
self._is_used_before_assignment_enabled = self.linter.is_message_enabled(
"used-before-assignment"
)
self._is_undefined_loop_variable_enabled = self.linter.is_message_enabled(
"undefined-loop-variable"
)
@utils.check_messages("redefined-outer-name")
def visit_for(self, node: nodes.For) -> None:
assigned_to = [a.name for a in node.target.nodes_of_class(nodes.AssignName)]
# Only check variables that are used
dummy_rgx = self.config.dummy_variables_rgx
assigned_to = [var for var in assigned_to if not dummy_rgx.match(var)]
for variable in assigned_to:
for outer_for, outer_variables in self._loop_variables:
if variable in outer_variables and not in_for_else_branch(
outer_for, node
):
self.add_message(
"redefined-outer-name",
args=(variable, outer_for.fromlineno),
node=node,
)
break
self._loop_variables.append((node, assigned_to))
@utils.check_messages("redefined-outer-name")
def leave_for(self, node: nodes.For) -> None:
self._loop_variables.pop()
self._store_type_annotation_names(node)
def visit_module(self, node: nodes.Module) -> None:
"""Visit module : update consumption analysis variable
checks globals doesn't overrides builtins
"""
self._to_consume = [NamesConsumer(node, "module")]
self._postponed_evaluation_enabled = is_postponed_evaluation_enabled(node)
for name, stmts in node.locals.items():
if utils.is_builtin(name):
if self._should_ignore_redefined_builtin(stmts[0]) or name == "__doc__":
continue
self.add_message("redefined-builtin", args=name, node=stmts[0])
@utils.check_messages(
"unused-import",
"unused-wildcard-import",
"redefined-builtin",
"undefined-all-variable",
"invalid-all-object",
"invalid-all-format",
"unused-variable",
)
def leave_module(self, node: nodes.Module) -> None:
"""Leave module: check globals."""
assert len(self._to_consume) == 1
self._check_metaclasses(node)
not_consumed = self._to_consume.pop().to_consume
# attempt to check for __all__ if defined
if "__all__" in node.locals:
self._check_all(node, not_consumed)
# check for unused globals
self._check_globals(not_consumed)
# don't check unused imports in __init__ files
if not self.config.init_import and node.package:
return
self._check_imports(not_consumed)
def visit_classdef(self, node: nodes.ClassDef) -> None:
"""Visit class: update consumption analysis variable."""
self._to_consume.append(NamesConsumer(node, "class"))
| |
"""
Implements an asynchronous interface for a Frontier Silicon device.
For example internet radios from: Medion, Hama, Auna, ...
"""
import asyncio
from asyncio.exceptions import TimeoutError
import typing as t
import logging
from afsapi.exceptions import (
FSApiException,
InvalidPinException,
InvalidSessionException,
NotImplementedException,
OutOfRangeException,
ConnectionError,
)
from afsapi.models import Preset, Equaliser, PlayerMode, PlayControl, PlayState
from afsapi.throttler import Throttler
from afsapi.utils import unpack_xml, maybe
from enum import Enum
import aiohttp
import xml.etree.ElementTree as ET
DataItem = t.Union[str, int]
DEFAULT_TIMEOUT_IN_SECONDS = 15
TIME_AFTER_READ_CALLS_IN_SECONDS = 0
TIME_AFTER_SET_CALLS_IN_SECONDS = 0.3
TIME_AFTER_SLOW_SET_CALLS_IN_SECONDS = 1.0
FSApiValueType = Enum("FSApiValueType", "TEXT BOOL INT LONG SIGNED_LONG")
VALUE_TYPE_TO_XML_PATH = {
FSApiValueType.TEXT: "c8_array",
FSApiValueType.INT: "u8",
FSApiValueType.LONG: "u32",
FSApiValueType.SIGNED_LONG: "s32",
}
READ_ONLY = False
READ_WRITE = True
# implemented API calls
API = {
# sys
"power": "netRemote.sys.power",
"mode": "netRemote.sys.mode",
# sys.info
"friendly_name": "netRemote.sys.info.friendlyName",
"radio_id": "netRemote.sys.info.radioId",
"version": "netRemote.sys.info.version",
# sys.caps
"valid_modes": "netRemote.sys.caps.validModes",
"equalisers": "netRemote.sys.caps.eqPresets",
"sleep": "netRemote.sys.sleep",
# sys.audio
"eqpreset": "netRemote.sys.audio.eqpreset",
"eqloudness": "netRemote.sys.audio.eqloudness",
"bass": "netRemote.sys.audio.eqcustom.param0",
"treble": "netRemote.sys.audio.eqcustom.param1",
# volume
"volume_steps": "netRemote.sys.caps.volumeSteps",
"volume": "netRemote.sys.audio.volume",
"mute": "netRemote.sys.audio.mute",
# play
"status": "netRemote.play.status",
"name": "netRemote.play.info.name",
"control": "netRemote.play.control",
"shuffle": "netRemote.play.shuffle",
"repeat": "netRemote.play.repeat",
"position": "netRemote.play.position",
"rate": "netRemote.play.rate",
# info
"text": "netRemote.play.info.text",
"artist": "netRemote.play.info.artist",
"album": "netRemote.play.info.album",
"graphic_uri": "netRemote.play.info.graphicUri",
"duration": "netRemote.play.info.duration",
# nav
"nav_state": "netRemote.nav.state",
"numitems": "netRemote.nav.numitems",
"nav_list": "netRemote.nav.list",
"navigate": "netRemote.nav.action.navigate",
"selectItem": "netRemote.nav.action.selectItem",
"presets": "netRemote.nav.presets",
"selectPreset": "netRemote.nav.action.selectPreset",
}
LOGGER = logging.getLogger(__name__)
# pylint: disable=R0904
class AFSAPI:
"""Builds the interface to a Frontier Silicon device."""
def __init__(
self,
webfsapi_endpoint: str,
pin: t.Union[str, int],
timeout: int = DEFAULT_TIMEOUT_IN_SECONDS,
):
"""Initialize the Frontier Silicon device."""
self.webfsapi_endpoint = webfsapi_endpoint
self.pin = str(pin)
self.timeout = timeout
self.sid: t.Optional[str] = None
self.__volume_steps: t.Optional[int] = None
self.__modes = None
self.__equalisers = None
self._current_nav_path: list[int] = []
self.__throttler = Throttler()
@staticmethod
async def get_webfsapi_endpoint(
fsapi_device_url: str, timeout: int = DEFAULT_TIMEOUT_IN_SECONDS
) -> str:
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(force_close=True),
timeout=aiohttp.ClientTimeout(total=timeout),
) as client:
try:
resp = await client.get(fsapi_device_url)
doc = ET.fromstring(await resp.text(encoding="utf-8"))
api = doc.find("webfsapi")
if api is not None and api.text:
return api.text
else:
raise FSApiException(
f"Could not retrieve webfsapi endpoint from {fsapi_device_url}"
)
except (aiohttp.ServerTimeoutError, asyncio.TimeoutError):
raise ConnectionError(
f"Did not get a response in time from {fsapi_device_url}"
)
except aiohttp.ClientConnectionError:
raise ConnectionError(f"Could not connect to {fsapi_device_url}")
@staticmethod
async def create(
fsapi_device_url: str,
pin: t.Union[str, int],
timeout: int = DEFAULT_TIMEOUT_IN_SECONDS,
) -> "AFSAPI":
webfsapi_endpoint = await AFSAPI.get_webfsapi_endpoint(
fsapi_device_url, timeout
)
return AFSAPI(webfsapi_endpoint, pin, timeout)
# http request helpers
async def _create_session(self) -> t.Optional[str]:
return unpack_xml(
await self.__call("CREATE_SESSION", retry_with_session=False), "sessionId"
)
async def __call(
self,
path: str,
extra: t.Optional[t.Dict[str, DataItem]] = None,
force_new_session: bool = False,
retry_with_session: bool = True,
throttle_wait_after_call: float = TIME_AFTER_READ_CALLS_IN_SECONDS,
) -> ET.Element:
"""Execute a frontier silicon API call."""
params: t.Dict[str, DataItem] = dict(pin=self.pin)
if force_new_session:
self.sid = await self._create_session()
if self.sid:
params.update(sid=self.sid)
if extra:
params.update(**extra)
async with aiohttp.ClientSession(
connector=aiohttp.TCPConnector(force_close=True),
timeout=aiohttp.ClientTimeout(total=self.timeout),
) as client:
try:
async with self.__throttler.throttle(throttle_wait_after_call):
result = await client.get(
f"{self.webfsapi_endpoint}/{path}", params=params
)
LOGGER.debug(f"Called {path} with {params}: {result.status}")
if result.status == 403:
raise InvalidPinException("Access denied - incorrect PIN")
elif result.status == 404:
# Bad session ID or service endpoint
logging.warn(
f"Service call failed with 404 to {self.webfsapi_endpoint}/{path}"
)
if not force_new_session and retry_with_session:
# retry command with a forced new session
return await self.__call(path, extra, force_new_session=True)
else:
raise InvalidSessionException(
"Wrong session-id or invalid command"
)
elif result.status != 200:
raise FSApiException(
f"Unexpected result {result.status}: {await result.text()}"
)
doc = ET.fromstring(await result.text(encoding="utf-8"))
status = unpack_xml(doc, "status")
if status == "FS_OK" or status == "FS_LIST_END":
return doc
elif status == "FS_NODE_DOES_NOT_EXIST":
raise NotImplementedException(
f"FSAPI service {path} not implemented at {self.webfsapi_endpoint}."
)
elif status == "FS_NODE_BLOCKED":
raise FSApiException("Device is not in the correct mode")
elif status == "FS_FAIL":
raise OutOfRangeException(
"Command failed. Value is not in range for this command."
)
elif status == "FS_PACKET_BAD":
raise FSApiException("This command can't be SET")
logging.error(f"Unexpected FSAPI status {status}")
raise FSApiException(f"Unexpected FSAPI status '{status}'")
except aiohttp.ClientConnectionError:
raise ConnectionError(f"Could not connect to {self.webfsapi_endpoint}")
except TimeoutError:
if not force_new_session and retry_with_session:
return await self.__call(path, extra, force_new_session=True)
else:
raise ConnectionError(
f"{self.webfsapi_endpoint} did not respond within {self.timeout} seconds"
)
# Helper methods
# Handlers
async def handle_get(self, item: str) -> ET.Element:
return await self.__call(f"GET/{item}")
async def handle_set(
self,
item: str,
value: t.Any,
throttle_wait_after_call: float = TIME_AFTER_SET_CALLS_IN_SECONDS,
) -> t.Optional[bool]:
status = unpack_xml(
await self.__call(
f"SET/{item}",
dict(value=value),
throttle_wait_after_call=throttle_wait_after_call,
),
"status",
)
return maybe(status, lambda x: x == "FS_OK")
async def handle_text(self, item: str) -> t.Optional[str]:
return unpack_xml(await self.handle_get(item), "value/c8_array")
async def handle_int(self, item: str) -> t.Optional[int]:
val = unpack_xml(await self.handle_get(item), "value/u8")
return maybe(val, int)
# returns an int, assuming the value does not exceed 8 bits
async def handle_long(self, item: str) -> t.Optional[int]:
val = unpack_xml(await self.handle_get(item), "value/u32")
return maybe(val, int)
async def handle_signed_long(
self,
item: str,
) -> t.Optional[int]:
val = unpack_xml(await self.handle_get(item), "value/s32")
return maybe(val, int)
async def handle_list(
self, list_name: str
) -> t.AsyncIterable[t.Tuple[str, t.Dict[str, t.Optional[DataItem]]]]:
def _handle_item(
item: ET.Element,
) -> t.Tuple[str, t.Dict[str, t.Optional[DataItem]]]:
key = item.attrib["key"]
def _handle_field(field: ET.Element) -> t.Tuple[str, t.Optional[DataItem]]:
# TODO: Handle other field types
if "name" in field.attrib:
id = field.attrib["name"]
s = unpack_xml(field, "c8_array")
v = maybe(unpack_xml(field, "u8"), int)
return (id, s or v)
raise ValueError("Invalid field")
value = dict(map(_handle_field, item.findall("field")))
return key, value
async def _get_next_items(
start: int, count: int
) -> t.Tuple[list[ET.Element], bool]:
try:
doc = await self.__call(
f"LIST_GET_NEXT/{list_name}/{start}", {"maxItems": count}
)
if doc and unpack_xml(doc, "status") == "FS_OK":
return doc.findall("item"), doc.find("listend") is not None
else:
return [], True
except OutOfRangeException:
return [], True
start = -1
count = 50 # asking for more items gives a bigger chance on FS_NODE_BLOCKED errors on subsequent requests
has_next = True
while has_next:
items, end_reached = await _get_next_items(start, count)
for item in items:
yield _handle_item(item)
start += count
if end_reached:
has_next = False
# sys
async def get_friendly_name(self) -> t.Optional[str]:
"""Get the friendly name of the device."""
return await self.handle_text(API["friendly_name"])
async def set_friendly_name(self, value: str) -> t.Optional[bool]:
"""Set the friendly name of the device."""
return await self.handle_set(API["friendly_name"], value)
async def get_version(self) -> t.Optional[str]:
"""Get the friendly name of the device."""
return await self.handle_text(API["version"])
async def get_radio_id(self) -> t.Optional[str]:
"""Get the friendly name of the device."""
return await self.handle_text(API["radio_id"])
async def get_power(self) -> t.Optional[bool]:
"""Check if the device is on."""
power = await self.handle_int(API["power"])
return bool(power)
async def set_power(self, value: bool = False) -> t.Optional[bool]:
"""Power on or off the device."""
power = await self.handle_set(
API["power"],
int(value),
throttle_wait_after_call=TIME_AFTER_SLOW_SET_CALLS_IN_SECONDS,
)
return bool(power)
async def get_volume_steps(self) -> t.Optional[int]:
"""Read the maximum volume level of the device."""
if not self.__volume_steps:
self.__volume_steps = await self.handle_int(API["volume_steps"])
return self.__volume_steps
# Volume
async def get_volume(self) -> t.Optional[int]:
"""Read the volume level of the device."""
return await self.handle_int(API["volume"])
async def set_volume(self, value: int) -> t.Optional[bool]:
"""Set the volume level of the device."""
return await self.handle_set(API["volume"], value)
# Mute
async def get_mute(self) -> t.Optional[bool]:
"""Check if the device is muted."""
mute = await self.handle_int(API["mute"])
return bool(mute)
async def set_mute(self, value: bool = False) -> t.Optional[bool]:
"""Mute or unmute the device."""
mute = await self.handle_set(API["mute"], int(value))
return bool(mute)
async def get_play_status(self) -> t.Optional[PlayState]:
"""Get the play status of the device."""
status = await self.handle_int(API["status"])
if status:
return PlayState(status)
else:
return None
async def get_play_name(self) -> t.Optional[str]:
"""Get the name of the played item."""
return await self.handle_text(API["name"])
async def get_play_text(self) -> t.Optional[str]:
"""Get the text associated with the played media."""
return await self.handle_text(API["text"])
async def get_play_artist(self) -> t.Optional[str]:
"""Get the artists of the current media(song)."""
return await self.handle_text(API["artist"])
async def get_play_album(self) -> t.Optional[str]:
"""Get the songs's album."""
return await self.handle_text(API["album"])
async def get_play_graphic(self) -> t.Optional[str]:
"""Get the album art associated with the song/album/artist."""
return await self.handle_text(API["graphic_uri"])
# Shuffle
async def get_play_shuffle(self) -> t.Optional[bool]:
status = await self.handle_int(API["shuffle"])
if status:
return status == 1
return None
async def set_play_shuffle(self, value: bool) -> t.Optional[bool]:
return await self.handle_set(API["shuffle"], int(value))
# Repeat
async def get_play_repeat(self) -> t.Optional[bool]:
status = await self.handle_int(API["repeat"])
if status:
return status == 1
return None
async def play_repeat(self, value: bool) -> t.Optional[bool]:
return await self.handle_set(API["repeat"], int(value))
async def get_play_duration(self) -> t.Optional[int]:
"""Get the duration of the played media."""
return await self.handle_long(API["duration"])
async def get_play_position(self) -> t.Optional[int]:
"""
The user can jump to a specific moment of the track. This means that the range of | |
j] = 0.125 * np.sum([.5 * G[i-2, j],\
# -1. * G[i-1, j-1], -1. * G[i-1, j+1], \
# -1. * G[i, j-2], 4. * R[i, j-1], 5. * G[i,j], 4. * R[i, j+1], -1. * G[i, j+2], \
# -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
# .5 * G[i+2, j]])
#
# # B at Green locations in Red rows
# B[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
# -1. * G[i-1, j-1], 4. * B[i-1, j], -1. * G[i-1, j+1], \
# .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
# -1. * G[i+1, j-1], 4. * B[i+1,j], -1. * G[i+1, j+1], \
# -1. * G[i+2, j]])
#
# # Green locations in Blue rows
# elif (((i % 2) == 0) and ((j % 2) == 0)):
#
# # R at Green locations in Blue rows
# R[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
# -1. * G[i-1, j-1], 4. * R[i-1, j], -1. * G[i-1, j+1], \
# .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
# -1. * G[i+1, j-1], 4. * R[i+1, j], -1. * G[i+1, j+1], \
# -1. * G[i+2, j]])
#
# # B at Green locations in Blue rows
# B[i, j] = 0.125 * np.sum([.5 * G[i-2, j], \
# -1. * G [i-1, j-1], -1. * G[i-1, j+1], \
# -1. * G[i, j-2], 4. * B[i, j-1], 5. * G[i,j], 4. * B[i, j+1], -1. * G[i, j+2], \
# -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
# .5 * G[i+2, j]])
#
# # R at Blue locations
# elif (((i % 2) == 0) and ((j % 2) != 0)):
# R[i, j] = 0.125 * np.sum([-1.5 * B[i-2, j], \
# 2. * R[i-1, j-1], 2. * R[i-1, j+1], \
# -1.5 * B[i, j-2], 6. * B[i,j], -1.5 * B[i, j+2], \
# 2. * R[i+1, j-1], 2. * R[i+1, j+1], \
# -1.5 * B[i+2, j]])
#
# # B at Red locations
# elif (((i % 2) != 0) and ((j % 2) == 0)):
# B[i, j] = 0.125 * np.sum([-1.5 * R[i-2, j], \
# 2. * B[i-1, j-1], 2. * B[i-1, j+1], \
# -1.5 * R[i, j-2], 6. * R[i,j], -1.5 * R[i, j+2], \
# 2. * B[i+1, j-1], 2. * B[i+1, j+1], \
# -1.5 * R[i+2, j]])
#
# if (timeshow):
# elapsed_time = time.process_time() - t0
# print("Red/Blue: row index: " + str(i-1) + " of " + str(height) + \
# " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
#
# elif (bayer_pattern == "grbg"):
#
# G[::2, ::2] = raw[::2, ::2]
# G[1::2, 1::2] = raw[1::2, 1::2]
# R[::2, 1::2] = raw[::2, 1::2]
# B[1::2, ::2] = raw[1::2, ::2]
#
# # Green channel
# for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
#
# # to display progress
# t0 = time.process_time()
#
# for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
#
# # G at Red location
# if (((i % 2) == 0) and ((j % 2) != 0)):
# G[i, j] = 0.125 * np.sum([-1. * R[i-2, j], \
# 2. * G[i-1, j], \
# -1. * R[i, j-2], 2. * G[i, j-1], 4. * R[i,j], 2. * G[i, j+1], -1. * R[i, j+2],\
# 2. * G[i+1, j], \
# -1. * R[i+2, j]])
# # G at Blue location
# elif (((i % 2) != 0) and ((j % 2) == 0)):
# G[i, j] = 0.125 * np.sum([-1. * B[i-2, j], \
# 2. * G[i-1, j], \
# -1. * B[i, j-2], 2. * G[i, j-1], 4. * B[i,j], 2. * G[i, j+1], -1. * B[i, j+2], \
# 2. * G[i+1, j],\
# -1. * B[i+2, j]])
# if (timeshow):
# elapsed_time = time.process_time() - t0
# print("Green: row index: " + str(i-1) + " of " + str(height) + \
# " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
#
# # Red and Blue channel
# for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
#
# # to display progress
# t0 = time.process_time()
#
# for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
#
# # Green locations in Red rows
# if (((i % 2) == 0) and ((j % 2) == 0)):
# # R at Green locations in Red rows
# R[i, j] = 0.125 * np.sum([.5 * G[i-2, j],\
# -1. * G[i-1, j-1], -1. * G[i-1, j+1], \
# -1. * G[i, j-2], 4. * R[i, j-1], 5. * G[i,j], 4. * R[i, j+1], -1. * G[i, j+2], \
# -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
# .5 * G[i+2, j]])
#
# # B at Green locations in Red rows
# B[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
# -1. * G[i-1, j-1], 4. * B[i-1, j], -1. * G[i-1, j+1], \
# .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
# -1. * G[i+1, j-1], 4. * B[i+1,j], -1. * G[i+1, j+1], \
# -1. * G[i+2, j]])
#
# # Green locations in Blue rows
# elif (((i % 2) != 0) and ((j % 2) != 0)):
#
# # R at Green locations in Blue rows
# R[i, j] = 0.125 * np.sum([-1. * G[i-2, j], \
# -1. * G[i-1, j-1], 4. * R[i-1, j], -1. * G[i-1, j+1], \
# .5 * G[i, j-2], 5. * G[i,j], .5 * G[i, j+2], \
# -1. * G[i+1, j-1], 4. * R[i+1, j], -1. * G[i+1, j+1], \
# -1. * G[i+2, j]])
#
# # B at Green locations in Blue rows
# B[i, j] = 0.125 * np.sum([.5 * G[i-2, j], \
# -1. * G [i-1, j-1], -1. * G[i-1, j+1], \
# -1. * G[i, j-2], 4. * B[i, j-1], 5. * G[i,j], 4. * B[i, j+1], -1. * G[i, j+2], \
# -1. * G[i+1, j-1], -1. * G[i+1, j+1], \
# .5 * G[i+2, j]])
#
# # R at Blue locations
# elif (((i % 2) != 0) and ((j % 2) == 0)):
# R[i, j] = 0.125 * np.sum([-1.5 * B[i-2, j], \
# 2. * R[i-1, j-1], 2. * R[i-1, j+1], \
# -1.5 * B[i, j-2], 6. * B[i,j], -1.5 * B[i, j+2], \
# 2. * R[i+1, j-1], 2. * R[i+1, j+1], \
# -1.5 * B[i+2, j]])
#
# # B at Red locations
# elif (((i % 2) == 0) and ((j % 2) != 0)):
# B[i, j] = 0.125 * np.sum([-1.5 * R[i-2, j], \
# 2. * B[i-1, j-1], 2. * B[i-1, j+1], \
# -1.5 * R[i, j-2], 6. * R[i,j], -1.5 * R[i, j+2], \
# 2. * B[i+1, j-1], 2. * B[i+1, j+1], \
# -1.5 * R[i+2, j]])
#
# if (timeshow):
# elapsed_time = time.process_time() - t0
# print("Red/Blue: row index: " + str(i-1) + " of " + str(height) + \
# " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
#
# elif (bayer_pattern == "bggr"):
#
# G[::2, 1::2] = raw[::2, 1::2]
# G[1::2, ::2] = raw[1::2, ::2]
# R[1::2, 1::2] = raw[1::2, 1::2]
# B[::2, ::2] = raw[::2, ::2]
#
# # Green channel
# for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
#
# # to display progress
# t0 = time.process_time()
#
# for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
#
# # G at Red location
# if (((i % 2) != 0) and ((j % 2) != 0)):
# G[i, j] = 0.125 * np.sum([-1. * R[i-2, j], \
# 2. * G[i-1, j], \
# -1. * R[i, j-2], 2. * G[i, j-1], 4. * R[i,j], 2. * G[i, j+1], -1. * R[i, j+2],\
# 2. * G[i+1, j], \
# -1. * R[i+2, j]])
# # G at Blue location
# elif (((i % 2) == 0) and ((j % 2) == 0)):
# G[i, j] = 0.125 * np.sum([-1. * B[i-2, j], \
# 2. * G[i-1, j], \
# -1. * B[i, j-2], 2. * G[i, j-1], 4. * B[i,j], 2. * G[i, j+1], -1. * B[i, j+2], \
# 2. * G[i+1, j],\
# -1. * B[i+2, j]])
# if (timeshow):
# elapsed_time = time.process_time() - t0
# print("Green: row index: " + str(i-1) + " of " + str(height) + \
# " | elapsed time: " + "{:.3f}".format(elapsed_time) + " seconds")
#
# # Red and Blue channel
# for i in range(no_of_pixel_pad, height + no_of_pixel_pad):
#
# # to display progress
# t0 = time.process_time()
#
# for j in range(no_of_pixel_pad, width + no_of_pixel_pad):
#
# # | |
zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [150, 180, 200, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] != data[band].attrs['nodata']),
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [],
"legend": {
# "url": ""
"styles": ["seasonal_WOfS_frequency",
"seasonal_WOfS_frequency_blues_transparent"]
},
"wcs_default_bands": ["frequency"],
"styles": [
{
"name": "seasonal_WOfS_frequency",
"title": " Water Summary",
"abstract": "WOfS seasonal summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"color_ramp": [
{
"value": 0.0,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.02,
"color": "#000000",
"alpha": 0.0
},
{
"value": 0.05,
"color": "#8e0101",
"alpha": 0.25
},
{
"value": 0.1,
"color": "#cf2200",
"alpha": 0.75
},
{
"value": 0.2,
"color": "#e38400"
},
{
"value": 0.3,
"color": "#e3df00"
},
{
"value": 0.4,
"color": "#62e300"
},
{
"value": 0.5,
"color": "#00e32d"
},
{
"value": 0.6,
"color": "#00e3c8"
},
{
"value": 0.7,
"color": "#0097e3"
},
{
"value": 0.8,
"color": "#005fe3"
},
{
"value": 0.9,
"color": "#000fe3"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
{
"name": "seasonal_WOfS_frequency_blues_transparent",
"title": "Water Summary (Blue)",
"abstract": "WOfS seasonal summary showing the frequency of Wetness",
"needed_bands": ["frequency"],
"color_ramp": [
{
"value": 0.0,
"color": "#ffffff",
"alpha": 0.0,
},
{
"value": 0.001,
"color": "#d5fef9",
"alpha": 0.0,
},
{
"value": 0.02,
"color": "#d5fef9",
},
{
"value": 0.2,
"color": "#71e3ff"
},
{
"value": 0.4,
"color": "#01ccff"
},
{
"value": 0.6,
"color": "#0178ff"
},
{
"value": 0.8,
"color": "#2701ff"
},
{
"value": 1.0,
"color": "#5700e3"
}
],
"legend": {
"units": "%",
"radix_point": 0,
"scale_by": 100.0,
"major_ticks": 0.1
}
},
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "seasonal_WOfS_frequency",
},
{
# Included as a keyword for the layer
"label": "WOfS Daily Observations",
# Included as a keyword for the layer
"type": "albers",
# Included as a keyword for the layer
"variant": "25m",
# The WMS name for the layer
"name": "wofs_albers",
# The Datacube name for the associated data product
"product_name": "wofs_albers",
"abstract": """
Water Observations from Space (WOfS) provides surface water observations derived from satellite imagery for all of Australia. The current product (Version 2.1.5) includes observations taken from 1986 to the present, from the Landsat 5, 7 and 8 satellites. WOfS covers all of mainland Australia and Tasmania but excludes off-shore Territories.
The WOfS product allows users to get a better understanding of where water is normally present in a landscape, where water is seldom observed, and where inundation has occurred occasionally.
Data is provided as Water Observation Feature Layers (WOFLs), in a 1 to 1 relationship with the input satellite data. Hence there is one WOFL for each satellite dataset processed for the occurrence of water. The details of the WOfS algorithm and derived statistics are available at http://dx.doi.org/10.1016/j.rse.2015.11.003.
For service status information, see https://status.dea.ga.gov.au""",
#"pq_band": "water",
# Min zoom factor - sets the zoom level where the cutover from indicative polygons
# to actual imagery occurs.
"min_zoom_factor": 35.0,
# The fill-colour of the indicative polygons when zoomed out.
# Triplets (rgb) or quadruplets (rgba) of integers 0-255.
"zoomed_out_fill_colour": [200, 180, 180, 160],
# Time Zone. In hours added to UTC (maybe negative)
# Used for rounding off scene times to a date.
# 9 is good value for imagery of Australia.
"time_zone": 9,
# Extent mask function
# Determines what portions of dataset is potentially meaningful data.
"extent_mask_func": lambda data, band: (data[band] & data[band].attrs['nodata']) == 0,
# "pq_manual_merge": True,
# Flags listed here are ignored in GetFeatureInfo requests.
# (defaults to empty list)
"ignore_info_flags": [
"nodata",
"noncontiguous",
],
# Include UTC dates for GSKY lookup
"feature_info_include_utc_dates": True,
"data_manual_merge": False,
"always_fetch_bands": [ ],
"apply_solar_corrections": False,
"fuse_func": "datacube_wms.wms_utils.wofls_fuser",
# A function that extracts the "sub-product" id (e.g. path number) from a dataset. Function should return a (small) integer
# If None or not specified, the product has no sub-layers.
# "sub_product_extractor": lambda ds: int(s3_path_pattern.search(ds.uris[0]).group("path")),
# A prefix used to describe the sub-layer in the GetCapabilities response.
# E.g. sub-layer 109 will be described as "Landsat Path 109"
# "sub_product_label": "Landsat Path",
# Bands to include in time-dimension "pixel drill".
# Don't activate in production unless you really know what you're doing.
# "band_drill": ["nir", "red", "green", "blue"],
# Styles.
#
# See band_mapper.py
#
# The various available spectral bands, and ways to combine them
# into a single rgb image.
# The examples here are ad hoc
#
"legend": {
"styles": ["observations"]
},
"wcs_default_bands": ["water"],
"styles": [
{
"name": "observations",
"title": "Observations",
"abstract": "Observations",
"value_map": {
"water": [
{
"title": "Invalid",
"abstract": "Slope or Cloud",
"flags": {
"or": {
"terrain_or_low_angle": True,
"cloud_shadow": True,
"cloud": True,
"high_slope": True,
"noncontiguous": True
}
},
"color": "#707070"
},
{
# Possible Sea Glint, also mark as invalid
"title": "",
"abstract": "",
"flags": {
"dry": True,
"sea": True
},
"color": "#707070"
},
{
"title": "Dry",
"abstract": "Dry",
"flags": {
"dry": True,
"sea": False,
},
"color": "#D99694"
},
{
"title": "Wet",
"abstract": "Wet or Sea",
"flags": {
"or": {
"wet": True,
"sea": True
}
},
"color": "#4F81BD"
}
]
}
},
{
"name": "wet",
"title": "Wet Only",
"abstract": "Wet Only",
"value_map": {
"water": [
{
"title": "Invalid",
"abstract": "Slope or Cloud",
"flags": {
"or": {
"terrain_or_low_angle": True,
"cloud_shadow": True,
"cloud": True,
"high_slope": True,
"noncontiguous": True
}
},
"color": "#707070",
"mask": True
},
{
# Possible Sea Glint, also mark as invalid
"title": "",
"abstract": "",
"flags": {
"dry": True,
"sea": True
},
"color": "#707070",
"mask": True
},
{
"title": "Dry",
"abstract": "Dry",
"flags": {
"dry": True,
"sea": False,
},
"color": "#D99694",
"mask": True
},
{
"title": "Wet",
"abstract": "Wet or Sea",
"flags": {
"or": {
"wet": True,
"sea": True
}
},
"color": "#4F81BD"
}
]
}
}
],
# Default style (if request does not specify style)
# MUST be defined in the styles list above.
# (Looks like Terria assumes this is the first style in the list, but this is
# not required by the standard.)
"default_style": "observations",
}
],
},
{
# Name and title of the platform layer.
# Platform layers are not mappable. The name is for internal server use only.
"name": "Sentinel-2 NRT",
"title": "Near Real-Time",
"abstract": "This is a 90-day rolling archive of daily Sentinel-2 Near Real Time data. "
"The Near Real-Time capability provides analysis-ready data "
"that is processed on receipt using the best-available ancillary information at the time to "
"provide atmospheric corrections. For more information see "
"http://pid.geoscience.gov.au/dataset/ga/122229",
# Products available for this platform.
# For each product, the "name" is the Datacube name, and the label is used
# to describe the label to end-users.
"products": [
{
# Included as a keyword for the layer
"label": "Sentinel 2 (A and B combined)",
# Included as a keyword for the layer
"type": "",
# Included as a keyword for the layer
"variant": "Surface Reflectance",
"abstract":"""
This is a 90-day rolling archive of daily Sentinel-2 Near Real Time data. The Near Real-Time capability provides analysis-ready data that is processed on receipt using the best-available ancillary information at the time to provide atmospheric corrections.
For more information see http://pid.geoscience.gov.au/dataset/ga/122229
The Normalised Difference Chlorophyll Index (NDCI) is based on the method of Mishra & Mishra 2012, and adapted to bands on the Sentinel-2A & B sensors.
The index indicates levels of chlorophyll-a (chl-a) concentrations in complex turbid productive waters such as those encountered in many | |
<filename>moirai/webapi/api.py
# -*- coding: utf-8; -*-
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import ahio
import io
import zipfile
import hashlib
import json
import dateutil.parser
import logging
import os.path
import tempfile
import scipy.io as sio
from multiprocessing import Pipe
from bson import json_util
from enum import Enum
from cheroot.wsgi import Server
from cheroot.wsgi import PathInfoDispatcher
from flask import Flask, request, send_file
from moirai.database import DatabaseV1
from moirai.hardware import Hardware
from moirai import __version__
class APIv1:
"""
Starts a WebServer for the API endpoint.
"""
def __init__(self, processHandler):
self.app = Flask(__name__)
self.database = DatabaseV1()
self.hardware = Hardware()
self.ph = processHandler
logging.getLogger('werkzeug').setLevel(logging.ERROR)
def run(self):
"""
Entry point for the class. Adds routes and starts listening.
"""
@self.app.after_request
def add_header(response):
response.headers['Cache-Control'] = 'no-store'
return response
self.app.add_url_rule('/', view_func=lambda: 'Moirai Control System\n')
self.app.add_url_rule('/login', view_func=self.login, methods=['POST'])
self.app.add_url_rule(
'/version', view_func=self.version, methods=['GET'])
self.app.add_url_rule(
'/set-password', view_func=self.set_password, methods=['POST'])
self.app.add_url_rule(
'/last_error', view_func=self.last_error, methods=['GET'])
self.app.add_url_rule(
'/hardware/drivers',
view_func=self.hardware_drivers,
methods=['GET'])
self.app.add_url_rule(
'/hardware/configuration',
view_func=self.hardware_set_configuration,
methods=['POST'])
self.app.add_url_rule(
'/hardware/configuration',
view_func=self.hardware_get_configuration,
methods=['GET'])
self.app.add_url_rule(
'/system_response/tests',
view_func=self.system_response_get_tests,
methods=['GET'])
self.app.add_url_rule(
'/system_response/tests',
view_func=self.system_response_set_tests,
methods=['POST'])
self.app.add_url_rule(
'/system_response/test/run',
view_func=self.system_response_run,
methods=['POST'])
self.app.add_url_rule(
'/system_response/test/stop',
view_func=self.system_response_stop,
methods=['GET'])
self.app.add_url_rule(
'/live_graph/tests',
view_func=self.live_graph_list_tests,
methods=['GET'])
self.app.add_url_rule(
'/live_graph/test',
view_func=self.live_graph_get_test,
methods=['POST'])
self.app.add_url_rule(
'/live_graph/test/remove',
view_func=self.live_graph_remove_test,
methods=['POST'])
self.app.add_url_rule(
'/live_graph/test/export',
view_func=self.live_graph_export_mat,
methods=['POST'])
self.app.add_url_rule(
'/controllers', view_func=self.controller_set, methods=['POST'])
self.app.add_url_rule(
'/controllers', view_func=self.controller_get, methods=['GET'])
self.app.add_url_rule(
'/controllers/run',
view_func=self.controller_run,
methods=['POST'])
self.app.add_url_rule(
'/controllers/export',
view_func=self.controller_export,
methods=['POST'])
self.app.add_url_rule(
'/controllers/import',
view_func=self.controller_import,
methods=['POST'])
self.app.add_url_rule(
'/controllers/stop',
view_func=self.controller_stop,
methods=['GET'])
self.app.add_url_rule(
'/db/dump', view_func=self.dump_database, methods=['GET'])
self.app.add_url_rule(
'/db/restore', view_func=self.restore_database, methods=['POST'])
self.app.add_url_rule(
'/simulation/run',
view_func=self.model_simulation_run,
methods=['POST'])
self.app.add_url_rule(
'/pid/run', view_func=self.pid_run, methods=['POST'])
self.app.add_url_rule(
'/free/run', view_func=self.free_run, methods=['POST'])
d = PathInfoDispatcher({'/': self.app})
self.server = Server(('0.0.0.0', 5000), d)
self.server.start()
def stop(self):
self.server.stop()
def version(self):
return json.dumps({'version': __version__})
def verify_token(self):
"""
Verifies the token sent as a HTTP Authorization header.
"""
try:
authorization = request.headers.get('Authorization')
token = authorization.split(' ')[-1]
return self.database.verify_token(token)
except Exception: # noqa: E722 pylint: disable=E722
return False
def login(self):
"""
Authenticates the user.
Should be called with a POST request containing the following body:
{
"password": string
}
@returns:
On success, HTTP 200 Ok and body:
{
"token": string
}
On failure, HTTP 403 Unauthorized and body:
{}
"""
password = request.json.get('password', '')
hasher = hashlib.sha512()
hasher.update(bytes(password, 'utf-8'))
password = <PASSWORD>()
saved_password = self.database.get_setting('password')
if saved_password == password:
return json.dumps({'token': self.database.generate_token()})
return '{}', 403
def set_password(self):
"""
Sets the password. Required body:
{
"password": string
}
@returns:
On success, HTTP 200 Ok and body:
{}
On failure, HTTP 403 Unauthorized and body:
{}
"""
if not self.verify_token():
return '{}', 403
password = request.json.get('password', '')
hasher = hashlib.sha512()
hasher.update(bytes(password, 'utf-8'))
password = <PASSWORD>()
self.database.set_setting('password', password)
return '{}'
def last_error(self):
"""
Returns the last error in the database.
@returns:
{
message: string
}
"""
if not self.verify_token():
return '{}', 403
error = self.database.get_setting('test_error')
return json.dumps({'message': error})
def hardware_drivers(self):
"""
Returns a JSON object with all the available drivers, their setups, if
any, and ports, if listable.
@returns:
{
name: string,
has_setup: boolean,
setup_arguments: [
{
name: string,
default_value: string
}
],
ports: [
{
id: number,
name: string,
analog: {
input: boolean,
output: boolean,
read_range: [number, number],
write_range: [number, number]
}
digital: {
input: boolean,
output: boolean,
pwm: boolean
}
}
]
}
"""
if not self.verify_token():
return '{}', 403
drivers = self.hardware.list_drivers()
drivers = [{
'name':
driver,
'has_setup':
self.hardware.driver_has_setup(driver),
'setup_arguments':
self.hardware.driver_setup_arguments(driver),
'ports':
self.__ports_for_driver(driver)
} for driver in drivers]
return json.dumps(drivers)
def hardware_set_configuration(self):
"""
Saves the given driver configuration. It must be a POST request with
the following body:
{
name: string,
setup_arguments: [
{
name: string,
value: string
}
],
ports: [
{
id: number,
name: string | number,
alias: string,
type: number,
defaultValue: string
}
],
configurations: [
{
port: number,
alias: string,
formula: string
}
]
}
@returns:
On success, HTTP 200 Ok and body:
{}
On failure, HTTP 403 Unauthorized and body:
{}
"""
if not self.verify_token():
return '{}', 403
configuration = request.json
self.database.set_setting('hardware_configuration', configuration)
return '{}'
def hardware_get_configuration(self):
"""
Returns the saved driver configuration. It must be a GET request.
@returns:
On success, HTTP 200 Ok and body:
{
name: string,
setup_arguments: [
{
name: string,
value: string
}
],
ports: [
{
id: number,
name: string | number,
alias: string,
type: number,
defaultValue: string
}
],
configurations: [
{
port: number,
alias: string,
formula: string
}
]
}
or
{} if there is no configuration saved
On failure, HTTP 403 Unauthorized and body:
{}
"""
if not self.verify_token():
return '{}', 403
config = self.database.get_setting('hardware_configuration') or {}
return json.dumps(config)
def system_response_get_tests(self):
"""
Returns the saved system response tests. It must be a GET request.
@returns:
On success, HTTP 200 Ok and body:
[{
id: number
name: string
type: string
inputs: string[]
output: string[]
points: [{
x: number
y: number
}]
fixedOutputs: [{
alias: string
value: number
}]
logRate: number
}]
or
[] if there is no configuration saved
On failure, HTTP 403 Unauthorized and body:
[]
"""
if not self.verify_token():
return '{}', 403
tests = self.database.get_setting('system_response_tests') or []
return json.dumps(tests)
def system_response_set_tests(self):
"""
Sets the saved system response tests. It must be a POST request with
the following body:
[{
id: number
name: string
type: string
inputs: string[]
output: string[]
points: [{
x: number
y: number
}]
fixedOutputs: [{
alias: string
value: number
}]
logRate: number
}]
@returns:
On success, HTTP 200 Ok and body:
{}
On failure, HTTP 403 Unauthorized and body:
{}
"""
if not self.verify_token():
return '{}', 403
tests = request.json
self.database.set_setting('system_response_tests', tests)
return '{}'
def system_response_run(self):
"""
Runs the given test. It must be a POST request with the following body:
{
test: number
}
@returns:
On success, HTTP 200 Ok and body:
{}
On failure, HTTP 403 Unauthorized and body:
{}
"""
if not self.verify_token():
return '{}', 403
test = request.json['test']
self.ph.send_command("hardware", "run_test", test)
return '{}'
def system_response_stop(self):
"""
Stops the running test. It must be a GET request.
@returns:
On success, HTTP 200 Ok and body:
{}
On failure, HTTP 403 Unauthorized and body:
{}
"""
if not self.verify_token():
return '{}', 403
self.database.set_setting('current_test', None)
return '{}'
def live_graph_list_tests(self):
"""
Returns a list of available graphs. It must be a GET request.
@returns:
On success, HTTP 200 Ok and body:
[
{
name: string
date: string (ISO 8601)
running: boolean
}
]
On failure, HTTP 403 Unauthorized and body:
[]
"""
if not self.verify_token():
return '{}', 403
running_test = self.database.get_setting('current_test')
tests = self.database.list_test_data()
if len(tests) == 0:
return '[]'
last_date = max([test['date'] for test in tests])
tests = [{
'name':
t['name'],
'date':
t['date'].isoformat(),
'running':
t['name'] == running_test and t['date'] == last_date
} for t in tests]
return json.dumps(tests)
def live_graph_get_test(self):
"""
Returns a graph. It must be a POST request with following body:
{
test: string
start_time: string (ISO 8601)
skip?: number
}
@returns:
On success, HTTP 200 Ok and body:
[
{
sensor: string
time: string | number
value: string | number
}
]
On failure, HTTP 403 Unauthorized and body:
[]
"""
if not self.verify_token():
return '{}', 403
test = request.json['test']
start_time = dateutil.parser.parse(request.json['start_time'])
skip = request.json.get('skip', 0)
points = self.database.get_test_data(test, start_time, skip)
return json.dumps(points)
def live_graph_remove_test(self):
"""
Deletes a test. It must be a POST request with following | |
for the get_builds method
:param value:
:type value: :class:`<[Build]> <azure.devops.v5_1.build.models.[Build]>`
:param continuation_token: The continuation token to be used to get the next page of results.
:type continuation_token: str
"""
self.value = value
self.continuation_token = continuation_token
def queue_build(self, build, project, ignore_warnings=None, check_in_ticket=None, source_build_id=None):
"""QueueBuild.
Queues a build
:param :class:`<Build> <azure.devops.v5_1.build.models.Build>` build:
:param str project: Project ID or project name
:param bool ignore_warnings:
:param str check_in_ticket:
:param int source_build_id:
:rtype: :class:`<Build> <azure.devops.v5_1.build.models.Build>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if ignore_warnings is not None:
query_parameters['ignoreWarnings'] = self._serialize.query('ignore_warnings', ignore_warnings, 'bool')
if check_in_ticket is not None:
query_parameters['checkInTicket'] = self._serialize.query('check_in_ticket', check_in_ticket, 'str')
if source_build_id is not None:
query_parameters['sourceBuildId'] = self._serialize.query('source_build_id', source_build_id, 'int')
content = self._serialize.body(build, 'Build')
response = self._send(http_method='POST',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('Build', response)
def update_build(self, build, project, build_id, retry=None):
"""UpdateBuild.
Updates a build.
:param :class:`<Build> <azure.devops.v5_1.build.models.Build>` build: The build.
:param str project: Project ID or project name
:param int build_id: The ID of the build.
:param bool retry:
:rtype: :class:`<Build> <azure.devops.v5_1.build.models.Build>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if retry is not None:
query_parameters['retry'] = self._serialize.query('retry', retry, 'bool')
content = self._serialize.body(build, 'Build')
response = self._send(http_method='PATCH',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('Build', response)
def update_builds(self, builds, project):
"""UpdateBuilds.
Updates multiple builds.
:param [Build] builds: The builds to update.
:param str project: Project ID or project name
:rtype: [Build]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(builds, '[Build]')
response = self._send(http_method='PATCH',
location_id='0cd358e1-9217-4d94-8269-1c1ee6f93dcf',
version='5.1',
route_values=route_values,
content=content)
return self._deserialize('[Build]', self._unwrap_collection(response))
def get_build_changes(self, project, build_id, continuation_token=None, top=None, include_source_change=None):
"""GetBuildChanges.
Gets the changes associated with a build
:param str project: Project ID or project name
:param int build_id:
:param str continuation_token:
:param int top: The maximum number of changes to return
:param bool include_source_change:
:rtype: :class:`<GetBuildChangesResponseValue>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if build_id is not None:
route_values['buildId'] = self._serialize.url('build_id', build_id, 'int')
query_parameters = {}
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if include_source_change is not None:
query_parameters['includeSourceChange'] = self._serialize.query('include_source_change', include_source_change, 'bool')
response = self._send(http_method='GET',
location_id='54572c7b-bbd3-45d4-80dc-28be08941620',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
response_value = self._deserialize('[Change]', self._unwrap_collection(response))
continuation_token = self._get_continuation_token(response)
return self.GetBuildChangesResponseValue(response_value, continuation_token)
class GetBuildChangesResponseValue(object):
def __init__(self, value, continuation_token):
"""
Response for the get_build_changes method
:param value:
:type value: :class:`<[Change]> <azure.devops.v5_1.build.models.[Change]>`
:param continuation_token: The continuation token to be used to get the next page of results.
:type continuation_token: str
"""
self.value = value
self.continuation_token = continuation_token
def get_build_controller(self, controller_id):
"""GetBuildController.
Gets a controller
:param int controller_id:
:rtype: :class:`<BuildController> <azure.devops.v5_1.build.models.BuildController>`
"""
route_values = {}
if controller_id is not None:
route_values['controllerId'] = self._serialize.url('controller_id', controller_id, 'int')
response = self._send(http_method='GET',
location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6',
version='5.1',
route_values=route_values)
return self._deserialize('BuildController', response)
def get_build_controllers(self, name=None):
"""GetBuildControllers.
Gets controller, optionally filtered by name
:param str name:
:rtype: [BuildController]
"""
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
response = self._send(http_method='GET',
location_id='fcac1932-2ee1-437f-9b6f-7f696be858f6',
version='5.1',
query_parameters=query_parameters)
return self._deserialize('[BuildController]', self._unwrap_collection(response))
def create_definition(self, definition, project, definition_to_clone_id=None, definition_to_clone_revision=None):
"""CreateDefinition.
Creates a new definition.
:param :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>` definition: The definition.
:param str project: Project ID or project name
:param int definition_to_clone_id:
:param int definition_to_clone_revision:
:rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if definition_to_clone_id is not None:
query_parameters['definitionToCloneId'] = self._serialize.query('definition_to_clone_id', definition_to_clone_id, 'int')
if definition_to_clone_revision is not None:
query_parameters['definitionToCloneRevision'] = self._serialize.query('definition_to_clone_revision', definition_to_clone_revision, 'int')
content = self._serialize.body(definition, 'BuildDefinition')
response = self._send(http_method='POST',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('BuildDefinition', response)
def delete_definition(self, project, definition_id):
"""DeleteDefinition.
Deletes a definition and all associated builds.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
self._send(http_method='DELETE',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values)
def get_definition(self, project, definition_id, revision=None, min_metrics_time=None, property_filters=None, include_latest_builds=None):
"""GetDefinition.
Gets a definition, optionally at a specific revision.
:param str project: Project ID or project name
:param int definition_id: The ID of the definition.
:param int revision: The revision number to retrieve. If this is not specified, the latest version will be returned.
:param datetime min_metrics_time: If specified, indicates the date from which metrics should be included.
:param [str] property_filters: A comma-delimited list of properties to include in the results.
:param bool include_latest_builds:
:rtype: :class:`<BuildDefinition> <azure.devops.v5_1.build.models.BuildDefinition>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if definition_id is not None:
route_values['definitionId'] = self._serialize.url('definition_id', definition_id, 'int')
query_parameters = {}
if revision is not None:
query_parameters['revision'] = self._serialize.query('revision', revision, 'int')
if min_metrics_time is not None:
query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
if include_latest_builds is not None:
query_parameters['includeLatestBuilds'] = self._serialize.query('include_latest_builds', include_latest_builds, 'bool')
response = self._send(http_method='GET',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('BuildDefinition', response)
def get_definitions(self, project, name=None, repository_id=None, repository_type=None, query_order=None, top=None, continuation_token=None, min_metrics_time=None, definition_ids=None, path=None, built_after=None, not_built_after=None, include_all_properties=None, include_latest_builds=None, task_id_filter=None, process_type=None, yaml_filename=None):
"""GetDefinitions.
Gets a list of definitions.
:param str project: Project ID or project name
:param str name: If specified, filters to definitions whose names match this pattern.
:param str repository_id: A repository ID. If specified, filters to definitions that use this repository.
:param str repository_type: If specified, filters to definitions that have a repository of this type.
:param str query_order: Indicates the order in which definitions should be returned.
:param int top: The maximum number of definitions to return.
:param str continuation_token: A continuation token, returned by a previous call to this method, that can be used to return the next set of definitions.
:param datetime min_metrics_time: If specified, indicates the date from which metrics should be included.
:param [int] definition_ids: A comma-delimited list that specifies the IDs of definitions to retrieve.
:param str path: If specified, filters to definitions under this folder.
:param datetime built_after: If specified, filters to definitions that have builds after this date.
:param datetime not_built_after: If specified, filters to definitions that do not have builds after this date.
:param bool include_all_properties: Indicates whether the full definitions should be returned. By default, shallow representations of the definitions are returned.
:param bool include_latest_builds: Indicates whether to return the latest and latest completed builds for this definition.
:param str task_id_filter: If specified, filters to definitions that use the specified task.
:param int process_type: If specified, filters to definitions with the given process type.
:param str yaml_filename: If specified, filters to YAML definitions that match the given filename.
:rtype: :class:`<GetDefinitionsResponseValue>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
if repository_id is not None:
query_parameters['repositoryId'] = self._serialize.query('repository_id', repository_id, 'str')
if repository_type is not None:
query_parameters['repositoryType'] = self._serialize.query('repository_type', repository_type, 'str')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if min_metrics_time is not None:
query_parameters['minMetricsTime'] = self._serialize.query('min_metrics_time', min_metrics_time, 'iso-8601')
if definition_ids is not None:
definition_ids = ",".join(map(str, definition_ids))
query_parameters['definitionIds'] = self._serialize.query('definition_ids', definition_ids, 'str')
if path is not None:
query_parameters['path'] = self._serialize.query('path', path, 'str')
if built_after is not None:
query_parameters['builtAfter'] = self._serialize.query('built_after', built_after, 'iso-8601')
if not_built_after is not None:
query_parameters['notBuiltAfter'] = self._serialize.query('not_built_after', not_built_after, 'iso-8601')
if include_all_properties is not None:
query_parameters['includeAllProperties'] = self._serialize.query('include_all_properties', include_all_properties, 'bool')
if include_latest_builds is not None:
query_parameters['includeLatestBuilds'] = self._serialize.query('include_latest_builds', include_latest_builds, 'bool')
if task_id_filter is not None:
query_parameters['taskIdFilter'] = self._serialize.query('task_id_filter', task_id_filter, 'str')
if process_type is not None:
query_parameters['processType'] = self._serialize.query('process_type', process_type, 'int')
if yaml_filename is not None:
query_parameters['yamlFilename'] = self._serialize.query('yaml_filename', yaml_filename, 'str')
response = self._send(http_method='GET',
location_id='dbeaf647-6167-421a-bda9-c9327b25e2e6',
version='5.1',
route_values=route_values,
query_parameters=query_parameters)
response_value = self._deserialize('[BuildDefinitionReference]', self._unwrap_collection(response))
continuation_token | |
<reponame>arsh-khokhar/Bayesian-Nets-Fraud-Detection
"""
File name: Factor.py
Author: <NAME>, <NAME>
Date last modified: 21 March, 2021
Python Version: 3.8
This script contains the Factor class with necessary utility functions
for doing inference using variable elimination or enumeration.
"""
from enum import IntEnum
import numpy as np
from typing import List
class Sign(IntEnum):
"""
IntEnum class used to represent whether a variable in a given table row
is + or -
"""
POSITIVE = 5
NEGATIVE = -5
UNDEFINED = -10
class Factor:
"""
Representation of a factor for inference
Attributes
solution_variables Non-evidence variables that are on the solution side
given_variables Non-evidence variables that are on the given side
variables All of the variables that this factor
table 2D numpy array, representing the factor's
probability table
solution_evidence Evidence variables that are on the solution side
(only used for printing the representation)
given_evidence Evidence variables that are on the given side
(only used for printing the representation)
is_probability Indicates if the factor is a probability table as well
"""
def __init__(self, solution_variables: List[str],
given_variables: List[str], values: List[float],
solution_evidence=None, given_evidence=None, is_probability=True) -> None:
"""
Constructor of a Factor
:param solution_variables: Non-evidence variables that are on the solution side
:param given_variables: Non-evidence variables that are on the given side
:param values: probability values of the table
:param solution_evidence: Evidence variables that are on the solution side
(only used for printing the representation)
:param given_evidence: Evidence variables that are on the given side
(only used for printing the representation)
:param is_probability: Indicates if the factor is a probability table as well
"""
self.solution_variables = solution_variables
self.given_variables = given_variables
self.variables = given_variables + solution_variables
# generating the table using the variables and the values
self.table = self.generate_table(self.variables, values)
# default value must be set to None for solution_evidence
# and given_evidence so that each Factor object can create
# its own copy of them. If default value is set to empty
# list, multiple objects refer to the same empty list object
# which causes issues
if solution_evidence is None:
self.solution_evidence = []
else:
self.solution_evidence = solution_evidence
if given_evidence is None:
self.given_evidence = []
else:
self.given_evidence = given_evidence
# If the factor is also a probability table.
self.is_probability = is_probability
@staticmethod
def generate_table_skeleton(variables: List[str]) -> np.ndarray:
"""
Generate a probability table with all the possible combinations
of the input variables without any probability values
:param variables: variables to generate the skeleton table for
:return: Mutidimensional numpy array representing the factor table
with the values (i.e. last column) unassigned
"""
num_variables = len(variables)
num_rows = 2**num_variables # assuming the domain length is 2 for all vars
num_cols = num_variables + 1 # one col per var + last col for probability values
table = np.zeros([num_rows, num_cols])
table.fill(Sign.UNDEFINED) # initializing everything with undefined
for i in range(len(variables)):
# combination_len is the length of same value for generating the
# combinations with other variables. E.g., for a table with 3 variables,
# The first column will be ++++---- (combination_len is 4 for this one)
# The second column will be ++--++-- (combination_len is 2 for this one)
# The last variable's column will be +-+-+-+- (combination_len is 1 here)
combination_len = 2**(num_variables - i)//2
for j in range(0, num_rows, combination_len*2):
table[j:combination_len+j, [i]] = Sign.POSITIVE
table[combination_len+j:2*combination_len+j, [i]] = Sign.NEGATIVE
return table
def generate_table(self, variables: List[str], values: List[float]) -> np.ndarray:
"""
Generate a probability table with all the possible combinations
of the input variables and the given probability values. Values
must be in correct order for this to work correctly.
:param variables: variables for the table
:param values: probability values of each row in correct order
:return: Mutidimensional numpy array representing the factor table
"""
table = self.generate_table_skeleton(variables)
table[:, -1] = values # last column is the probability values
return table
def print_representation(self) -> None:
"""
Print out the representation of a factor
ex P(+x,y|-t,a)
"""
if self.is_probability:
print('P(', end='')
else:
print('f(', end='')
Factor.print_representation_helper(self.solution_evidence, self.solution_variables)
if len(self.given_variables) > 0 or len(self.given_evidence) > 0:
print('|', end='')
Factor.print_representation_helper(self.given_evidence, self.given_variables)
print(')')
@staticmethod
def print_representation_helper(evidence_vars: List[str], variables: List[str]) -> None:
"""
Print out the variables portion of a factor representation
:param evidence_vars: Evidence vars to print
:param variables: Non-evidence vars to print
"""
for i, entry in enumerate(evidence_vars):
var, value = entry
if value == Sign.POSITIVE:
print('+{0}'.format(var.lower()), end='')
else:
print('-{0}'.format(var.lower()), end='')
if i != len(evidence_vars) - 1 or len(variables) > 0:
print(',', end='')
for i, var in enumerate(variables):
print(var, end='')
if i != len(variables) - 1:
print(',', end='')
def print_factor(self) -> None:
"""
Print the factor
"""
for row in self.table:
for i, cell in enumerate(row):
if cell == Sign.POSITIVE:
print('{:^10s}'.format("+" + self.variables[i].lower()), end='|')
elif cell == Sign.NEGATIVE:
print('{:^10s}'.format("-" + self.variables[i].lower()), end='|')
else:
cell_value = "{:^.5f}".format(cell)
print('{:^10s}'.format(cell_value), end='|\n')
print()
def remove_var(self, variable: str) -> None:
"""
Remove a variable from variables, given_variables, and
solution_variables after it's observed or summed out
:param variable: The variable to remove
"""
self.variables.remove(variable)
self.safe_remove_list(self.given_variables, variable)
self.safe_remove_list(self.solution_variables, variable)
@staticmethod
def safe_remove_list(input_list: list, value) -> None:
"""
Deletes an entry from a list without throwing the ValueError exception
in case the entry is not in the set
:param input_list: set from which the entry has to be removed
:param value: the set entry to be removed
"""
try:
input_list.remove(value)
except ValueError:
pass
def observe_var(self, variable: str, value: Sign) -> None:
"""
Restricts a variable to some value in the factor
:param variable: Variable to restrict
:param value: value for restriction
"""
if variable not in self.variables:
return
index = self.variables.index(variable)
self.table = self.table[self.table[:, index] == value]
index = self.variables.index(variable)
self.table = np.delete(self.table, index, axis=1)
if variable in self.solution_variables:
self.solution_evidence.append((variable.lower(), value))
if variable in self.given_variables:
self.given_evidence.append((variable.lower(), value))
self.remove_var(variable)
def normalize(self) -> None:
"""
Normalizes the factor so that all rows add up to 1
"""
sum_of_vals = sum(self.table[:, -1])
self.table[:, -1] /= sum_of_vals
def sumout(self, variable: str) -> None:
"""
Sums out a variable in the factor
:param variable: variable to sumout
"""
if variable not in self.variables:
return
index = self.variables.index(variable) # getting the index of the variable
self.table = np.delete(self.table, index, axis=1)
self.remove_var(variable)
# new table for summed out rows
summed_out = self.generate_table_skeleton(self.variables)
summed_out[:, -1] = 0
for row1 in summed_out:
for row2 in self.table:
# if two rows are the same after removing a variable,
# their values are added
if np.array_equal(row1[:-1], row2[:-1]):
row1[-1] = row1[-1] + row2[-1]
self.table = summed_out
@staticmethod
def multiply(factor1, factor2):
"""
Multiply two factors together and return a new product factor
:param factor1: The first factor to multiply
:param factor2: The second factor to multiply
:return: A new factor that's the product of factor1 and factor2
"""
# factor1 cannot have any solution variables that factor2 has as given variables
# since we assume that only the reverse is true later in the function
if not any((True for x in factor1.solution_variables if x in factor2.given_variables)):
temp = factor2
factor2 = factor1
factor1 = temp
# list(dict.fromkeys( ... )) is used to remove duplicates while maintaining order
new_solution_vars = list(dict.fromkeys(factor1.solution_variables
+ factor2.solution_variables))
new_given_vars = list(dict.fromkeys(factor1.given_variables
+ [item for item in factor2.given_variables if item not in new_solution_vars]))
# generate the probabilities for our new factor
# (no sorting is required since the variable ordering consistent between
# the two original factors)
new_prob_list = []
for row1 in factor1.table:
for row2 in factor2.table:
if Factor.is_valid_row_multiply(factor1.variables,
factor2.variables,
row1, row2):
new_prob_list.append(row1[-1] * row2[-1])
return Factor(new_solution_vars, new_given_vars, new_prob_list,
list(set(factor1.solution_evidence + factor2.solution_evidence)),
list(set(factor1.given_evidence + factor2.given_evidence)),
False)
@staticmethod
def is_valid_row_multiply(variables1: list, variables2: list, row1: list, row2: list) -> bool:
"""
Two rows are valid if there are no contradictions between them
Ex if two rows have +x and -x respectively then they're not valid
:param variables1: Variables of factor 1
:param variables2: Variables of factor 2
:param row1: Row from factor 1
:param row2: Row from factor 2
:return: True if two rows are valid | |
#!/usr/bin/env python
"""
================================================
ABElectronics Expander Pi
Requires smbus2 or python smbus to be installed
================================================
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
try:
from smbus2 import SMBus
except ImportError:
try:
from smbus import SMBus
except ImportError:
raise ImportError("python-smbus or smbus2 not found")
try:
import spidev
except ImportError:
raise ImportError(
"spidev not found.")
import re
import platform
import datetime
"""
Private Classes
"""
class _ABEHelpers:
"""
Local Functions used across all Expander Pi classes
"""
@staticmethod
def updatebyte(byte, bit, value):
"""
internal method for setting the value of a single bit within a byte
"""
if value == 0:
return byte & ~(1 << bit)
elif value == 1:
return byte | (1 << bit)
@staticmethod
def get_smbus():
"""
internal method for getting an instance of the i2c bus
"""
i2c__bus = 1
# detect the device that is being used
device = platform.uname()[1]
if device == "orangepione": # running on orange pi one
i2c__bus = 0
elif device == "orangepiplus": # running on orange pi plus
i2c__bus = 0
elif device == "orangepipcplus": # running on orange pi pc plus
i2c__bus = 0
elif device == "linaro-alip": # running on Asus Tinker Board
i2c__bus = 1
elif device == "raspberrypi": # running on raspberry pi
# detect i2C port number and assign to i2c__bus
for line in open('/proc/cpuinfo').readlines():
model = re.match('(.*?)\\s*:\\s*(.*)', line)
if model:
(name, value) = (model.group(1), model.group(2))
if name == "Revision":
if value[-4:] in ('0002', '0003'):
i2c__bus = 0
else:
i2c__bus = 1
break
try:
return SMBus(i2c__bus)
except IOError:
raise 'Could not open the i2c bus'
"""
Public Classes
"""
class ADC:
"""
Based on the Microchip MCP3208
"""
# variables
__adcrefvoltage = 4.096 # reference voltage for the ADC chip.
__spiADC = None
def __init__(self):
# Define SPI bus and init
self.__spiADC = spidev.SpiDev()
self.__spiADC.open(0, 0)
self.__spiADC.max_speed_hz = (1900000)
# public methods
def read_adc_voltage(self, channel, mode):
"""
Read the voltage from the selected channel on the ADC
Channel = 1 to 8
Mode = 0 or 1 - 0 = single ended, 1 = differential
"""
if (mode < 0) or (mode > 1):
raise ValueError('read_adc_voltage: mode out of range')
if (channel > 4) and (mode == 1):
raise ValueError('read_adc_voltage: channel out of range')
if (channel > 8) or (channel < 1):
raise ValueError('read_adc_voltage: channel out of range')
raw = self.read_adc_raw(channel, mode)
voltage = (self.__adcrefvoltage / 4096) * raw
return voltage
def read_adc_raw(self, channel, mode):
"""
Read the raw value from the selected channel on the ADC
Channel = 1 to 8
Mode = 0 or 1 - 0 = single ended, 1 = differential
"""
if (mode < 0) or (mode > 1):
raise ValueError('read_adc_voltage: mode out of range')
if (channel > 4) and (mode == 1):
raise ValueError('read_adc_voltage: channel out of range when \
mode = 1')
if (channel > 8) or (channel < 1):
raise ValueError('read_adc_voltage: channel out of range')
channel = channel - 1
if mode == 0:
raw = self.__spiADC.xfer2(
[6 + (channel >> 2), (channel & 3) << 6, 0])
if mode == 1:
raw = self.__spiADC.xfer2(
[4 + (channel >> 2), (channel & 3) << 6, 0])
ret = ((raw[1] & 0x0F) << 8) + (raw[2])
return ret
def set_adc_refvoltage(self, voltage):
"""
set the reference voltage for the analogue to digital converter.
By default the ADC uses an onboard 4.096V voltage reference. If you
choose to use an external voltage reference you will need to
use this method to set the ADC reference voltage to match the
supplied reference voltage.
The reference voltage must be less than or equal to the voltage on
the Raspberry Pi 5V rail.
"""
if (voltage >= 0.0) and (voltage <= 5.5):
self.__adcrefvoltage = voltage
else:
raise ValueError('set_adc_refvoltage: reference voltage \
out of range')
return
class DAC:
"""
Based on the Microchip MCP4822
Define SPI bus and init
"""
__spiDAC = None
dactx = [0, 0]
# Max DAC output voltage. Depends on gain factor
# The following table is in the form <gain factor>:<max voltage>
__dacMaxOutput__ = {
1: 2.048, # This is Vref
2: 4.096 # This is double Vref
}
maxdacvoltage = 2.048
# public methods
def __init__(self, gainFactor=1):
"""Class Constructor
gainFactor -- Set the DAC's gain factor. The value should
be 1 or 2. Gain factor is used to determine output voltage
from the formula: Vout = G * Vref * D/4096
Where G is gain factor, Vref (for this chip) is 2.048 and
D is the 12-bit digital value
"""
# Define SPI bus and init
self.__spiDAC = spidev.SpiDev()
self.__spiDAC.open(0, 1)
self.__spiDAC.max_speed_hz = (20000000)
if (gainFactor != 1) and (gainFactor != 2):
raise ValueError('DAC __init__: Invalid gain factor. \
Must be 1 or 2')
else:
self.gain = gainFactor
self.maxdacvoltage = self.__dacMaxOutput__[self.gain]
def set_dac_voltage(self, channel, voltage):
"""
set the voltage for the selected channel on the DAC
voltage can be between 0 and 2.047 volts when gain is set to 1\
or 4.096 when gain is set to 2
"""
if (channel > 2) or (channel < 1):
raise ValueError('set_dac_voltage: DAC channel needs to be 1 or 2')
if (voltage >= 0.0) and (voltage < self.maxdacvoltage):
rawval = (voltage / 2.048) * 4096 / self.gain
self.set_dac_raw(channel, int(rawval))
else:
raise ValueError('set_dac_voltage: voltage out of range')
return
def set_dac_raw(self, channel, value):
"""
Set the raw value from the selected channel on the DAC
Channel = 1 or 2
Value between 0 and 4095
"""
if (channel > 2) or (channel < 1):
raise ValueError('set_dac_voltage: DAC channel needs to be 1 or 2')
if (value < 0) and (value > 4095):
raise ValueError('set_dac_voltage: value out of range')
self.dactx[1] = (value & 0xff)
if self.gain == 1:
self.dactx[0] = (((value >> 8) & 0xff) | (channel - 1) << 7 |
1 << 5 | 1 << 4)
else:
self.dactx[0] = (((value >> 8) & 0xff) | (channel - 1) << 7 |
1 << 4)
# Write to device
self.__spiDAC.xfer2(self.dactx)
return
class IO:
"""
The MCP23017 chip is split into two 8-bit ports. port 0 controls pins
1 to 8 while port 1 controls pins 9 to 16.
When writing to or reading from a port the least significant bit
represents the lowest numbered pin on the selected port.
#
"""
# Define registers values from datasheet
IODIRA = 0x00 # IO direction A - 1= input 0 = output
IODIRB = 0x01 # IO direction B - 1= input 0 = output
# Input polarity A - If a bit is set, the corresponding GPIO register bit
# will reflect the inverted value on the pin.
IPOLA = 0x02
# Input polarity B - If a bit is set, the corresponding GPIO register bit
# will reflect the inverted value on the pin.
IPOLB = 0x03
# The GPINTEN register controls the interrupt-onchange feature for each
# pin on port A.
GPINTENA = 0x04
# The GPINTEN register controls the interrupt-onchange feature for each
# pin on port B.
GPINTENB = 0x05
# Default value for port A - These bits set the compare value for pins
# configured for interrupt-on-change. If the associated pin level is the
# opposite from the register bit, an interrupt occurs.
DEFVALA = 0x06
# Default value for port B - These bits set the compare value for pins
# configured for interrupt-on-change. If the associated pin level is the
# opposite from the register bit, an interrupt occurs.
DEFVALB = 0x07
# Interrupt control register for port A. If 1 interrupt is fired when the
# pin matches the default value, if 0 the interrupt is fired on state
# change
INTCONA = 0x08
# Interrupt control register for port B. If 1 interrupt is fired when the
# pin matches the default value, if 0 the interrupt is fired on state
# change
INTCONB = 0x09
IOCON = 0x0A # see datasheet for configuration register
GPPUA | |
<gh_stars>1-10
"""Provides a class for constructing simulations based on Firedrake.
Simulations proceed forward in time by solving
a sequence of Initial Boundary Values Problems (IBVP's).
Using the Firedrake framework,
the PDE's are discretized in space with Finite Elements (FE).
The symbolic capabilities of Firedrake are used to
automatically implement backward difference formula (BDF) time
discretizations and to automatically linearize nonlinear problems
with Newton's method.
Nonlinear and linear solvers are provided by PETSc
and are accessed via the Firedrake interface.
This module imports `firedrake` as `fe` and its documentation writes
`fe` instead of `firedrake`.
"""
import typing
import pathlib
import ufl
import firedrake as fe
import sapphire.time_discretization
import sapphire.output
class Simulation:
"""A PDE-based simulation using the Firedrake framework.
The PDE's are discretized in space using finite elements
and in time using backward difference formulas.
Implementing a simulation requires at least instantiating this
class and calling the instance's `run` method.
"""
def __init__(self,
solution: fe.Function,
time: float = 0.,
time_stencil_size: int = 2,
timestep_size: float = 1.,
quadrature_degree: int = None,
solver_parameters: dict = {
"snes_type": "newtonls",
"snes_monitor": None,
"ksp_type": "preonly",
"pc_type": "lu",
"mat_type": "aij",
"pc_factor_mat_solver_type": "mumps"},
output_directory_path: str = "output/",
fieldnames: typing.Iterable[str] = None):
"""
Instantiating this class requires enough information to fully
specify the FE spatial discretization and weak form residual.
boundary conditions, and initial values. All of these required
arguments are Firedrake objects used according to Firedrake
conventions.
Backward Difference Formula time discretizations are
automatically implemented. To use a different time
discretization, inherit this class and redefine
`time_discrete_terms`.
Args:
solution: Solution for a single time step.
As a `fe.Function`, this also defines the
mesh, element, and solution function space.
time: The initial time.
time_stencil_size: The number of solutions at
discrete times used for approximating time derivatives.
This also determines the number of stored solutions.
Must be greater than zero.
Defaults to 2. Set to 1 for steady state problems.
Increase for higher-order time accuracy.
timestep_size: The size of discrete time steps.
Defaults to 1.
Higher order time discretizations are assumed to use
a constant time step size.
quadrature_degree: The quadrature degree used for
numerical integration.
Defaults to `None`, in which case Firedrake will
automatically choose a suitable quadrature degree.
solver_parameters: The solver parameters dictionary
which Firedrake uses to configure PETSc.
output_directory_path: String that will be converted
to a Path where output files will be written.
Defaults to "output/".
fieldnames: A list of names for the components of `solution`.
Defaults to `None`.
These names can be used when indexing solutions that are split
either by `firedrake.split` or `firedrake.Function.split`.
If not `None`, then the `dict` `self.solution_fields` will be created.
The `dict` will have two items for each field,
containing the results of either splitting method.
The results of `firedrake.split` will be suffixed with "_ufl".
"""
assert(time_stencil_size > 0)
self.fieldcount = len(solution.split())
if fieldnames is None:
fieldnames = ["w_{}" for i in range(self.fieldcount)]
assert(len(fieldnames) == self.fieldcount)
self.fieldnames = fieldnames
self.solution = solution
self.time = fe.Constant(time)
self.solution_space = self.solution.function_space()
self.mesh = self.solution_space.mesh()
self.unit_vectors = unit_vectors(self.mesh)
self.element = self.solution_space.ufl_element()
self.timestep_size = fe.Constant(timestep_size)
self.quadrature_degree = quadrature_degree
self.dx = fe.dx(degree = self.quadrature_degree)
self.solver_parameters = solver_parameters
initial_values = self.initial_values()
if initial_values is not None:
self.solution = self.solution.assign(initial_values)
# States for time dependent simulation and checkpointing
self.solutions = [self.solution,]
self.times = [self.time,]
self.state = {
"solution": self.solution,
"time": self.time,
"index": 0}
self.states = [self.state,]
for i in range(1, time_stencil_size):
self.solutions.append(fe.Function(self.solution))
self.times.append(fe.Constant(self.time - i*timestep_size))
self.states.append({
"solution": self.solutions[i],
"time": self.times[i],
"index": -i})
# Continuation helpers
self.backup_solution = fe.Function(self.solution)
# Mixed solution indexing helpers
self.solution_fields = {}
self.solution_subfunctions = {}
self.test_functions = {}
self.time_discrete_terms = {}
self.solution_subspaces = {}
for name, field, field_pp, testfun, timeterm in zip(
fieldnames,
fe.split(self.solution),
self.solution.split(),
fe.TestFunctions(self.solution_space),
time_discrete_terms(
solutions = self.solutions,
timestep_size = self.timestep_size)):
self.solution_fields[name] = field
self.solution_subfunctions[name] = field_pp
self.test_functions[name] = testfun
self.time_discrete_terms[name] = timeterm
self.solution_subspaces[name] = self.solution_space.sub(
fieldnames.index(name))
# Output controls
self.output_directory_path = pathlib.Path(output_directory_path)
self.output_directory_path.mkdir(parents = True, exist_ok = True)
self.vtk_solution_file = None
self.plotvars = None
self.snes_iteration_count = 0
def run(self,
endtime: float,
write_checkpoints: bool = True,
write_vtk_solutions: bool = False,
write_plots: bool = False,
write_initial_outputs: bool = True,
endtime_tolerance: float = 1.e-8,
solve: typing.Callable = None) \
-> (typing.List[fe.Function], float):
"""Run simulation forward in time.
Args:
endtime (float): Run until reaching this time.
write_vtk_solutions (bool): Write checkpoints if True.
write_vtk_solutions (bool): Write solutions to VTK if True.
write_plots (bool): Write plots if True.
Writing the plots to disk can in some cases dominate
the processing time. Additionally, much more data
is generated, requiring more disk storage space.
write_initial_outputs (bool): Write for initial values
before solving the first time step. Default to True.
You may want to set this to False if, for example, you
are calling `run` repeatedly with later endtimes.
In such a case, the initial values are the same as
the previously computed solution, and so they should
not be written again.
endtime_tolerance (float): Allows endtime to be only
approximately reached. This is larger than a
typical floating point comparison tolerance
because errors accumulate between timesteps.
solve (callable): This is called to solve each time step.
By default, this will be set to `self.solve`.
"""
if write_initial_outputs:
self.write_outputs(
headers = True,
checkpoint = write_checkpoints,
vtk = write_vtk_solutions,
plots = write_plots)
if solve is None:
solve = self.solve
while self.time.__float__() < (endtime - endtime_tolerance):
self.states = self.push_back_states()
self.time = self.time.assign(self.time + self.timestep_size)
self.state["index"] += 1
self.solution = solve()
print("Solved at time t = {}".format(self.time.__float__()))
self.write_outputs(
headers = False,
checkpoint = write_checkpoints,
vtk = write_vtk_solutions,
plots = write_plots)
return self.states
def solve(self) -> fe.Function:
"""Set up the problem and solver, and solve.
This is a JIT (just in time), ensuring that the problem and
solver setup are up-to-date before calling the solver.
All compiled objects are cached, so the JIT problem and solver
setup does not have any significant performance overhead.
"""
problem = fe.NonlinearVariationalProblem(
F = self.weak_form_residual(),
u = self.solution,
bcs = self.dirichlet_boundary_conditions(),
J = fe.derivative(self.weak_form_residual(), self.solution))
solver = fe.NonlinearVariationalSolver(
problem = problem,
nullspace = self.nullspace(),
solver_parameters = self.solver_parameters)
solver.solve()
self.snes_iteration_count += solver.snes.getIterationNumber()
return self.solution
def weak_form_residual(self):
raise("This method must be redefined by the derived class.")
def initial_values(self):
return None
def dirichlet_boundary_conditions(self):
return None
def nullspace(self):
return None
def push_back_states(self) -> typing.List[typing.Dict]:
"""Push back states, including solutions, times, and indices.
Sufficient solutions are stored for the time discretization.
Advancing the simulation forward in time requires re-indexing
the solutions and times.
"""
for i in range(len(self.states[1:])):
rightstate = self.states[-1 - i]
leftstate = self.states[-2 - i]
rightstate["index"] = leftstate["index"]
for key in "solution", "time":
# Set values of `fe.Function` and `fe.Constant`
# with their `assign` methods.
rightstate[key] = rightstate[key].assign(leftstate[key])
return self.states
def postprocess(self) -> 'Simulation':
""" This is called by `write_outputs` before writing.
Redefine this to add post-processing.
"""
return self
def kwargs_for_writeplots(self) -> dict:
"""Return kwargs needed for `sappphire.outupt.writeplots`.
By default, no plots are made.
This must be redefined to return a dict
if `run` is called with `plot = True`.
"""
return None
def write_checkpoint(self):
sapphire.output.write_checkpoint(
states=self.states,
dirpath=self.output_directory_path,
filename="checkpoints")
def write_outputs(self,
headers: bool,
checkpoint: bool = True,
vtk: bool = False,
plots: bool = False):
"""Write all outputs.
This creates or appends the CSV report,
writes the latest solution, and plots (in 1D/2D case).
Redefine this to control outputs.
Args:
| |
filter_obj
def apply_options_obj(options, obj, dest):
"""Updates an object with options
Parameters
----------
options : dict
* dict containing options definition
obj : :class:`taniumpy.object_types.base.BaseType`
* TaniumPy object to apply `options` to
dest : list of str
* list of valid destinations (i.e. `filter` or `group`)
Returns
-------
obj : :class:`taniumpy.object_types.base.BaseType`
* TaniumPy object updated with attributes from `options`
"""
# if no user supplied options, return the filter object unchanged
if not options:
return obj
for k, v in options.items():
for om in pytan.constants.OPTION_MAPS:
if om['destination'] != dest:
continue
om_attrs = list(om.get('attrs', {}).keys())
om_attr = om.get('attr', '')
if om_attr:
om_attrs.append(om_attr)
if k.lower() not in om_attrs:
continue
dbg = "option {!r} value {!r} mapped to: {!r}".format
manuallog.debug(dbg(k, v, om))
valid_values = om.get('valid_values', [])
valid_type = om.get('valid_type', str)
if valid_values:
valid_values = eval(valid_values)
valid_values_str = " -- valid values: "
valid_values_str += ', '.join(valid_values)
else:
valid_values = []
valid_values_str = ""
if len(str(v)) == 0:
err = (
"Option {!r} requires a {} value{}"
).format
raise pytan.exceptions.DefinitionParserError(err(k, valid_type, valid_values_str))
if valid_type == int:
try:
v = int(v)
except:
err = (
"Option {!r} value {!r} is not an integer"
).format
raise pytan.exceptions.DefinitionParserError(err(k, v))
if valid_type == str:
if not is_str(v):
err = (
"Option {!r} value {!r} is not a string"
).format
raise pytan.exceptions.DefinitionParserError(err(k, v))
value_match = None
if valid_values:
for x in valid_values:
if v.lower() == x.lower():
value_match = x
break
if value_match is None:
err = (
"Option {!r} value {!r} does not match one of {}"
).format
raise pytan.exceptions.DefinitionParserError(err(k, v, valid_values))
else:
v = value_match
# update obj with k = v
setattr(obj, k, v)
break
dbg = "Options {!r} updated to: {}".format
manuallog.debug(dbg(options, str(obj)))
return obj
def chk_def_key(def_dict, key, keytypes, keysubtypes=None, req=False):
"""Checks that def_dict has key
Parameters
----------
def_dict : dict
* Definition dictionary
key : str
* key to check for in def_dict
keytypes : list of str
* list of str of valid types for key
keysubtypes : list of str
* if key is a dict or list, validate that all values of dict or list are in keysubtypes
req : bool
* False: key does not have to be in def_dict
* True: key must be in def_dict, throw :exc:`pytan.exceptions.DefinitionParserError` if not
"""
if key not in def_dict:
if req:
err = "Definition {} missing 'filter' key!".format
raise pytan.exceptions.DefinitionParserError(err(def_dict))
return
val = def_dict.get(key)
if type(val) not in keytypes:
err = (
"'{}' key in definition dictionary must be a {}, you supplied "
"a {}!"
).format
raise pytan.exceptions.DefinitionParserError(err(key, keytypes, type(val)))
if not keysubtypes or not val:
return
if is_dict(val):
subtypes = [type(x) for x in list(val.values())]
else:
subtypes = [type(x) for x in val]
if not all([x in keysubtypes for x in subtypes]):
err = (
"'{}' key in definition dictionary must be a {} of {}s, "
"you supplied {}!"
).format
raise pytan.exceptions.DefinitionParserError(err(key, keytypes, keysubtypes, subtypes))
def empty_obj(taniumpy_object):
"""Validate that a given TaniumPy object is not empty
Parameters
----------
taniumpy_object : :class:`taniumpy.object_types.base.BaseType`
* object to check if empty
Returns
-------
bool
* True if `taniumpy_object` is considered empty, False otherwise
"""
v = [getattr(taniumpy_object, '_list_properties', {}), is_str(taniumpy_object)]
if any(v) and not taniumpy_object:
return True
else:
return False
def get_q_obj_map(qtype):
"""Gets an object map for `qtype`
Parameters
----------
qtype : str
* question type to get object map from in :data:`pytan.constants.Q_OBJ_MAP`
Returns
-------
obj_map : dict
* matching object map for `qtype` from :data:`pytan.constants.Q_OBJ_MAP`
"""
try:
obj_map = pytan.constants.Q_OBJ_MAP[qtype.lower()]
except KeyError:
err = "{} not a valid question type, must be one of {!r}".format
raise pytan.exceptions.HandlerError(err(qtype, list(pytan.constants.Q_OBJ_MAP.keys())))
return obj_map
def get_obj_map(objtype):
"""Gets an object map for `objtype`
Parameters
----------
objtype : str
* object type to get object map from in :data:`pytan.constants.GET_OBJ_MAP`
Returns
-------
obj_map : dict
* matching object map for `objtype` from :data:`pytan.constants.GET_OBJ_MAP`
"""
try:
obj_map = pytan.constants.GET_OBJ_MAP[objtype.lower()]
except KeyError:
err = "{} not a valid object to get, must be one of {!r}".format
raise pytan.exceptions.HandlerError(err(objtype, list(pytan.constants.GET_OBJ_MAP.keys())))
return obj_map
def get_taniumpy_obj(obj_map):
"""Gets a taniumpy object from `obj_map`
Parameters
----------
obj_map : str
* str of taniumpy object to fetch
Returns
-------
obj : :class:`taniumpy.object_types.base.BaseType`
* matching taniumpy object for `obj_map`
"""
try:
obj = getattr(taniumpy, obj_map)
except Exception as e:
err = "Could not find taniumpy object {}: {}".format
raise pytan.exceptions.HandlerError(err(obj_map, e))
return obj
def check_dictkey(d, key, valid_types, valid_list_types):
"""Yet another method to check a dictionary for a key
Parameters
----------
d : dict
* dictionary to check for key
key : str
* key to check for in d
valid_types : list of str
* list of str of valid types for key
valid_list_types : list of str
* if key is a list, validate that all values of list are in valid_list_types
"""
if key in d:
k_val = d[key]
k_type = type(k_val)
if k_type not in valid_types:
err = "{!r} must be one of {}, you supplied {}!".format
raise pytan.exceptions.HandlerError(err(key, valid_types, k_type))
if is_list(k_val) and valid_list_types:
valid_list_types = [eval(x) for x in valid_list_types]
list_types = [type(x) for x in k_val]
list_types_match = [x in valid_list_types for x in list_types]
if not all(list_types_match):
err = "{!r} must be a list of {}, you supplied {}!".format
raise pytan.exceptions.HandlerError(err(key, valid_list_types, list_types))
def func_timing(f):
"""Decorator to add timing information around a function """
def wrap(*args, **kwargs):
time1 = datetime.datetime.utcnow()
ret = f(*args, **kwargs)
time2 = datetime.datetime.utcnow()
elapsed = time2 - time1
m = '{}() TIMING start: {}, end: {}, elapsed: {}'.format
timinglog.debug(m(f.__name__, time1, time2, elapsed))
return ret
return wrap
def eval_timing(c):
"""Yet another method to time things -- c will be evaluated and timing information will be printed out
"""
t_start = datetime.now()
r = eval(c)
t_end = datetime.now()
t_elapsed = t_end - t_start
m = "Timing info for {} -- START: {}, END: {}, ELAPSED: {}, RESPONSE LEN: {}".format
mylog.warn(m(c, t_start, t_end, t_elapsed, len(r)))
return (c, r, t_start, t_end, t_elapsed)
def xml_pretty(x, pretty=True, indent=' ', **kwargs):
"""Uses :mod:`xmltodict` to pretty print an XML str `x`
Parameters
----------
x : str
* XML string to pretty print
Returns
-------
str :
* The pretty printed string of `x`
"""
x_parsed = xmltodict.parse(x)
x_unparsed = xmltodict.unparse(x_parsed, pretty=pretty, indent=indent)
return x_unparsed
def log_session_communication(h):
"""Uses :func:`xml_pretty` to pretty print the last request and response bodies from the
session object in h to the logging system
Parameters
----------
h : Handler object
* Handler object with session object containing last request and response body
"""
response_obj = h.session.LAST_REQUESTS_RESPONSE
request_body = response_obj.request.body
response_body = response_obj.text
try:
req = xml_pretty(request_body)
except Exception as e:
req = "Failed to prettify xml: {}, raw xml:\n{}".format(e, request_body)
prettylog.debug("Last HTTP request:\n{}".format(req))
try:
resp = xml_pretty(response_body)
except Exception as e:
resp = "Failed to prettify xml: {}, raw xml:\n{}".format(e, response_body)
prettylog.debug("Last HTTP response:\n{}".format(xml_pretty(resp)))
def xml_pretty_resultxml(x):
"""Uses :mod:`xmltodict` to pretty print an the ResultXML element in XML str `x`
Parameters
----------
x : str
* XML string to pretty print
Returns
-------
str :
* The pretty printed string of ResultXML in `x`
"""
x_parsed = xmltodict.parse(x)
x_find = x_parsed["soap:Envelope"]["soap:Body"]["t:return"]["ResultXML"]
x_unparsed = xml_pretty(x_find)
return x_unparsed
def xml_pretty_resultobj(x):
"""Uses :mod:`xmltodict` to pretty print an the result-object element in XML str `x`
Parameters
----------
x : str
* XML string to pretty print
Returns
-------
str :
* The pretty printed string of result-object in `x`
"""
x_parsed = xmltodict.parse(x)
x_find = x_parsed["soap:Envelope"]["soap:Body"]["t:return"]
x_find = x_parsed["result-object"]
x_unparsed = xmltodict.unparse(x_find, pretty=True, indent=' ')
return x_unparsed
def get_dict_list_len(d, keys=[], negate=False):
"""Gets the sum of each list in dict `d`
Parameters
----------
d : dict of str : list
* dict to sums of
keys : list of str
* list of keys to get sums of, if empty gets a sum of all keys
negate : bool
* only used if keys supplied
* False : get the sums of `d` that do match keys
* True : get the sums of `d` that do not match keys
Returns
-------
list_len : int
| |
s.define(monom + t*t*t)
sage: t.define(monom + s*s)
sage: [s.coefficient(i) for i in range(9)]
[0, 1, 0, 1, 3, 3, 7, 30, 63]
sage: [t.coefficient(i) for i in range(9)]
[0, 1, 1, 0, 2, 6, 7, 20, 75]
Test Recursive 3
::
sage: s = L()
sage: s._name = 's'
sage: s.define(one+monom*s*s*s)
sage: [s.coefficient(i) for i in range(10)]
[1, 1, 3, 12, 55, 273, 1428, 7752, 43263, 246675]
"""
self._copy(x)
x._reference = self
def coefficient(self, n):
"""
Return the coefficient of xn in self.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: f = L(ZZ)
sage: [f.coefficient(i) for i in range(5)]
[0, 1, -1, 2, -2]
"""
# The following line must not be written n < self.get_aorder()
# because comparison of Integer and OnfinityOrder is not implemented.
if self.get_aorder() > n:
return self.parent()._zero_base_ring
assert self.is_initialized
return self._stream[n]
def get_aorder(self):
"""
Return the approximate order of self.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: a = L.gen()
sage: a.get_aorder()
1
"""
self.refine_aorder()
return self.aorder
def get_order(self):
"""
Return the order of self.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: a = L.gen()
sage: a.get_order()
1
"""
self.refine_aorder()
return self.order
def get_stream(self):
"""
Return self's underlying Stream object.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: a = L.gen()
sage: s = a.get_stream()
sage: [s[i] for i in range(5)]
[0, 1, 0, 0, 0]
"""
self.refine_aorder()
return self._stream
def _approximate_order(self, compute_coefficients, new_order, *series):
if self.is_initialized:
return
ochanged = self.aorder_changed
ao = new_order(*[s.aorder for s in series])
ao = inf if ao == unk else ao
tchanged = self.set_approximate_order(ao)
if len(series) == 0:
must_initialize_coefficient_stream = True
tchanged = ochanged = False
elif len(series) == 1 or len(series) == 2:
must_initialize_coefficient_stream = ( self.aorder == unk or self.is_initialized is False)
else:
raise ValueError
if ochanged or tchanged:
for s in series:
s.compute_aorder()
ao = new_order(*[s.aorder for s in series])
tchanged = self.set_approximate_order(ao)
if must_initialize_coefficient_stream:
self.initialize_coefficient_stream(compute_coefficients)
if hasattr(self, '_reference') and self._reference is not None:
self._reference._copy(self)
def _new(self, compute_coefficients, order_op, *series, **kwds):
parent = kwds['parent'] if 'parent' in kwds else self.parent()
new_fps = self.__class__(parent, stream=None, order=unk, aorder=self.aorder,
aorder_changed=True, is_initialized=False)
new_fps.compute_aorder = lambda: new_fps._approximate_order(compute_coefficients, order_op, *series)
return new_fps
def _add_(self, y):
"""
EXAMPLES: Test Plus 1
::
sage: from sage.combinat.species.series import *
sage: from sage.combinat.species.stream import Stream
sage: L = LazyPowerSeriesRing(QQ)
sage: gs0 = L([0])
sage: gs1 = L([1])
sage: sum1 = gs0 + gs1
sage: sum2 = gs1 + gs1
sage: sum3 = gs1 + gs0
sage: [gs0.coefficient(i) for i in range(11)]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
sage: [gs1.coefficient(i) for i in range(11)]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
sage: [sum1.coefficient(i) for i in range(11)]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
sage: [sum2.coefficient(i) for i in range(11)]
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
sage: [sum3.coefficient(i) for i in range(11)]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
Test Plus 2
::
sage: gs1 = L([1,2,4,8,0])
sage: gs2 = L([-1, 0,-1,-9,22,0])
sage: sum = gs1 + gs2
sage: sum2 = gs2 + gs1
sage: [ sum.coefficient(i) for i in range(5) ]
[0, 2, 3, -1, 22]
sage: [ sum.coefficient(i) for i in range(5, 11) ]
[0, 0, 0, 0, 0, 0]
sage: [ sum2.coefficient(i) for i in range(5) ]
[0, 2, 3, -1, 22]
sage: [ sum2.coefficient(i) for i in range(5, 11) ]
[0, 0, 0, 0, 0, 0]
"""
return self._new(partial(self._plus_gen, y), min, self, y)
add = _add_
def _plus_gen(self, y, ao):
"""
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: gs1 = L([1])
sage: g = gs1._plus_gen(gs1, 0)
sage: [next(g) for i in range(5)]
[2, 2, 2, 2, 2]
::
sage: g = gs1._plus_gen(gs1, 2)
sage: [next(g) for i in range(5)]
[0, 0, 2, 2, 2]
"""
base_ring = self.parent().base_ring()
zero = base_ring(0)
for n in range(ao):
yield zero
n = ao
while True:
yield self._stream[n] + y._stream[n]
n += 1
def _mul_(self, y):
"""
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: gs0 = L(0)
sage: gs1 = L([1])
::
sage: prod0 = gs0 * gs1
sage: [prod0.coefficient(i) for i in range(11)]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
::
sage: prod1 = gs1 * gs0
sage: [prod1.coefficient(i) for i in range(11)]
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
::
sage: prod2 = gs1 * gs1
sage: [prod2.coefficient(i) for i in range(11)]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
::
sage: gs1 = L([1,2,4,8,0])
sage: gs2 = L([-1, 0,-1,-9,22,0])
::
sage: prod1 = gs1 * gs2
sage: [prod1.coefficient(i) for i in range(11)]
[-1, -2, -5, -19, 0, 0, 16, 176, 0, 0, 0]
::
sage: prod2 = gs2 * gs1
sage: [prod2.coefficient(i) for i in range(11)]
[-1, -2, -5, -19, 0, 0, 16, 176, 0, 0, 0]
"""
return self._new(partial(self._times_gen, y), lambda a,b:a+b, self, y)
times = _mul_
def _times_gen(self, y, ao):
r"""
Return an iterator for the coefficients of self \* y.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: f = L([1,1,0])
sage: g = f._times_gen(f,0)
sage: [next(g) for i in range(5)]
[1, 2, 1, 0, 0]
"""
base_ring = self.parent().base_ring()
zero = base_ring(0)
for n in range(ao):
yield zero
n = ao
while True:
low = self.aorder
high = n - y.aorder
nth_coefficient = zero
#Handle the zero series
if low == inf or high == inf:
yield zero
n += 1
continue
for k in range(low, high+1):
cx = self._stream[k]
if cx == 0:
continue
nth_coefficient += cx * y._stream[n-k]
yield nth_coefficient
n += 1
def __pow__(self, n):
"""
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: f = L([1,1,0]) # 1+x
sage: g = f^3
sage: g.coefficients(4)
[1, 3, 3, 1]
::
sage: f^0
1
"""
if not isinstance(n, (int, Integer)) or n < 0:
raise ValueError("n must be a nonnegative integer")
return prod([self]*n, self.parent().identity_element())
def __invert__(self):
"""
Return 1 over this power series, i.e. invert this power series.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: x = L.gen()
Geometric series::
sage: a = ~(1-x); a.compute_coefficients(10); a
1 + x + x^2 + x^3 + x^4 + x^5 + x^6 + x^7 + x^8 + x^9 + x^10 + O(x^11)
(Shifted) Fibonacci numbers::
sage: b = ~(1-x-x^2); b.compute_coefficients(10); b
1 + x + 2*x^2 + 3*x^3 + 5*x^4 + 8*x^5
+ 13*x^6 + 21*x^7 + 34*x^8 + 55*x^9 + 89*x^10 + O(x^11)
Series whose constant coefficient is `0` cannot be inverted::
sage: ~x
Traceback (most recent call last):
....
ZeroDivisionError: cannot invert x because constant coefficient is 0
"""
if self.get_aorder() > 0:
raise ZeroDivisionError(
'cannot invert {} because '
'constant coefficient is 0'.format(self))
return self._new(self._invert_gen, lambda a: 0, self)
invert = __invert__
def _invert_gen(self, ao):
r"""
Return an iterator for the coefficients of 1 over this power series.
TESTS::
sage: L = LazyPowerSeriesRing(QQ)
sage: f = L([1, -1, 0])
sage: g = f._invert_gen(0)
sage: [next(g) for i in range(10)]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
"""
from itertools import count
assert ao == 0
ic0 = ~self.coefficient(0)
yield ic0
if self.order == 0:
return
one = self.parent()(1)
base = one - ic0 * self
base.coefficient(0)
ao_base = base.get_aorder()
assert ao_base >= 1
current = one + base
k = 1
for n in count(1):
while ao_base*k < n:
current = one + base * current
k += 1
current.coefficient(n) # make sure new current is initialized
ao_base = base.get_aorder() # update this so that while above is faster
yield current.coefficient(n) * ic0
def _div_(self, other):
"""
Divide this power series by ``other``.
EXAMPLES::
sage: L = LazyPowerSeriesRing(QQ)
sage: x = L.gen()
Fibonacci numbers::
sage: b = x / (1-x-x^2); b.compute_coefficients(10); b
x + x^2 + 2*x^3 + 3*x^4 + 5*x^5 + 8*x^6
+ 13*x^7 + 21*x^8 + 34*x^9 + 55*x^10 + O(x^11)
"""
| |
import numpy as np
import logging
import six
import loopy as lp
import cantera as ct
from nose.plugins.attrib import attr
from unittest.case import SkipTest
from parameterized import parameterized
try:
from scipy.sparse import csr_matrix, csc_matrix
except ImportError:
csr_matrix = None
csc_matrix = None
from pyjac.core.rate_subs import (
get_concentrations,
get_rop, get_rop_net, get_spec_rates, get_molar_rates, get_thd_body_concs,
get_rxn_pres_mod, get_reduced_pressure_kernel, get_lind_kernel,
get_sri_kernel, get_troe_kernel, get_simple_arrhenius_rates,
polyfit_kernel_gen, get_plog_arrhenius_rates, get_cheb_arrhenius_rates,
get_rev_rates, get_temperature_rate, get_extra_var_rates)
from pyjac.loopy_utils.loopy_utils import (
loopy_options, kernel_call, set_adept_editor, populate, get_target)
from pyjac.core.enum_types import RateSpecialization, FiniteDifferenceMode
from pyjac.core.create_jacobian import (
dRopi_dnj, dci_thd_dnj, dci_lind_dnj, dci_sri_dnj, dci_troe_dnj,
total_specific_energy, dTdot_dnj, dEdot_dnj, thermo_temperature_derivative,
dRopidT, dRopi_plog_dT, dRopi_cheb_dT, dTdotdT, dci_thd_dT, dci_lind_dT,
dci_troe_dT, dci_sri_dT, dEdotdT, dTdotdE, dEdotdE, dRopidE, dRopi_plog_dE,
dRopi_cheb_dE, dci_thd_dE, dci_lind_dE, dci_troe_dE, dci_sri_dE,
determine_jac_inds, reset_arrays, get_jacobian_kernel,
finite_difference_jacobian)
from pyjac.core import array_creator as arc
from pyjac.core.enum_types import reaction_type, falloff_form
from pyjac.kernel_utils import kernel_gen as k_gen
from pyjac.tests import get_test_langs, TestClass
from pyjac.tests.test_utils import (
kernel_runner, get_comparable, _generic_tester,
_full_kernel_test, with_check_inds, inNd, skipif, xfail)
from pyjac.core.enum_types import KernelType
from pyjac import utils
class editor(object):
def __init__(self, independent, dependent,
problem_size, order, do_not_set=[],
skip_on_missing=None):
def __replace_problem_size(shape):
new_shape = []
for x in shape:
if x != arc.problem_size.name:
new_shape.append(x)
else:
new_shape.append(problem_size)
return tuple(new_shape)
assert len(independent.shape) == 2
self.independent = independent.copy(shape=__replace_problem_size(
independent.shape))
indep_size = independent.shape[1]
assert len(dependent.shape) == 2
self.dependent = dependent.copy(shape=__replace_problem_size(
dependent.shape))
dep_size = dependent.shape[1]
self.problem_size = problem_size
# create the jacobian
self.output = arc.creator('jac', np.float64,
(problem_size, dep_size, indep_size),
order=order)
self.output = self.output(*['i', 'j', 'k'])[0]
self.do_not_set = utils.listify(do_not_set)
self.skip_on_missing = skip_on_missing
def set_single_kernel(self, single_kernel):
"""
It's far easier to use two generated kernels, one that uses the full
problem size (for calling via loopy), and another that uses a problem
size of 1, to work with Adept indexing in the AD kernel
"""
self.single_kernel = single_kernel
def set_skip_on_missing(self, func):
"""
If set, skip if the :class:`kernel_info` returned by this function
is None
"""
self.skip_on_missing = func
def __call__(self, knl):
return set_adept_editor(knl, self.single_kernel, self.problem_size,
self.independent, self.dependent, self.output,
self.do_not_set)
# various convenience wrappers
def _get_fall_call_wrapper():
def fall_wrapper(loopy_opts, namestore, test_size):
return get_simple_arrhenius_rates(loopy_opts, namestore,
test_size, falloff=True)
return fall_wrapper
def _get_plog_call_wrapper(rate_info):
def plog_wrapper(loopy_opts, namestore, test_size):
if rate_info['plog']['num']:
return get_plog_arrhenius_rates(loopy_opts, namestore,
rate_info['plog']['max_P'],
test_size)
return plog_wrapper
def _get_cheb_call_wrapper(rate_info):
def cheb_wrapper(loopy_opts, namestore, test_size):
if rate_info['cheb']['num']:
return get_cheb_arrhenius_rates(loopy_opts, namestore,
np.max(rate_info['cheb']['num_P']),
np.max(rate_info['cheb']['num_T']),
test_size)
return cheb_wrapper
def _get_poly_wrapper(name, conp):
def poly_wrapper(loopy_opts, namestore, test_size):
return polyfit_kernel_gen(name, loopy_opts, namestore, test_size)
return poly_wrapper
def _get_ad_jacobian(self, test_size, conp=True, pregen=None, return_kernel=False):
"""
Convenience method to evaluate the finite difference Jacobian from a given
Phi / parameter set
Parameters
----------
test_size: int
The number of conditions to test
conp: bool
If True, CONP else CONV
pregen: Callable [None]
If not None, this corresponds to a previously generated AD-Jacobian kernel
Used in the validation tester to speed up chunked Jacobian evaluation
return_kernel: bool [False]
If True, we want __get_jacobian to return the kernel and kernel call
rather than the evaluated array (to be used with :param:`pregen`)
"""
class create_arr(object):
def __init__(self, dim):
self.dim = dim
@classmethod
def new(cls, inds):
if isinstance(inds, np.ndarray):
dim = inds.size
elif isinstance(inds, list):
dim = len(inds)
elif isinstance(inds, arc.creator):
dim = inds.initializer.size
elif isinstance(inds, int):
dim = inds
else:
return None
return cls(dim)
def __call__(self, order):
return np.zeros((test_size, self.dim), order=order)
# get rate info
rate_info = determine_jac_inds(
self.store.reacs, self.store.specs, RateSpecialization.fixed)
# create loopy options
# --> have to turn off the temperature guard to avoid fmin / max issues with
# Adept
ad_opts = loopy_options(order='C', lang='c', auto_diff=True)
# create namestore
store = arc.NameStore(ad_opts, rate_info, conp, test_size)
# and the editor
edit = editor(store.n_arr, store.n_dot, test_size,
order=ad_opts.order)
# setup args
phi = self.store.phi_cp if conp else self.store.phi_cv
allint = {'net': rate_info['net']['allint']}
args = {
'phi': lambda x: np.array(phi, order=x, copy=True),
'jac': lambda x: np.zeros((test_size,) + store.jac.shape[1:], order=x),
'wdot': create_arr.new(store.num_specs),
'Atroe': create_arr.new(store.num_troe),
'Btroe': create_arr.new(store.num_troe),
'Fcent': create_arr.new(store.num_troe),
'Fi': create_arr.new(store.num_fall),
'Pr': create_arr.new(store.num_fall),
'X': create_arr.new(store.num_sri),
'conc': create_arr.new(store.num_specs),
'dphi': lambda x: np.zeros_like(phi, order=x),
'kf': create_arr.new(store.num_reacs),
'kf_fall': create_arr.new(store.num_fall),
'kr': create_arr.new(store.num_rev_reacs),
'pres_mod': create_arr.new(store.num_thd),
'rop_fwd': create_arr.new(store.num_reacs),
'rop_rev': create_arr.new(store.num_rev_reacs),
'rop_net': create_arr.new(store.num_reacs),
'thd_conc': create_arr.new(store.num_thd),
'b': create_arr.new(store.num_specs),
'Kc': create_arr.new(store.num_rev_reacs)
}
if conp:
args['P_arr'] = lambda x: np.array(self.store.P, order=x, copy=True)
args['h'] = create_arr.new(store.num_specs)
args['cp'] = create_arr.new(store.num_specs)
else:
args['V_arr'] = lambda x: np.array(self.store.V, order=x, copy=True)
args['u'] = create_arr.new(store.num_specs)
args['cv'] = create_arr.new(store.num_specs)
# trim unused args
args = {k: v for k, v in six.iteritems(args) if v is not None}
# obtain the finite difference jacobian
kc = kernel_call('dnkdnj', [None], **args)
# check for pregenerated kernel
if pregen is not None:
return pregen(kc)
__b_call_wrapper = _get_poly_wrapper('b', conp)
__cp_call_wrapper = _get_poly_wrapper('cp', conp)
__cv_call_wrapper = _get_poly_wrapper('cv', conp)
__h_call_wrapper = _get_poly_wrapper('h', conp)
__u_call_wrapper = _get_poly_wrapper('u', conp)
def __extra_call_wrapper(loopy_opts, namestore, test_size):
return get_extra_var_rates(loopy_opts, namestore,
conp=conp, test_size=test_size)
def __temperature_wrapper(loopy_opts, namestore, test_size):
return get_temperature_rate(loopy_opts, namestore,
conp=conp, test_size=test_size)
return _get_jacobian(
self, __extra_call_wrapper, kc, edit, ad_opts, conp,
extra_funcs=[get_concentrations, get_simple_arrhenius_rates,
_get_plog_call_wrapper(rate_info),
_get_cheb_call_wrapper(rate_info),
get_thd_body_concs, _get_fall_call_wrapper(),
get_reduced_pressure_kernel, get_lind_kernel,
get_sri_kernel, get_troe_kernel,
__b_call_wrapper, get_rev_rates,
get_rxn_pres_mod, get_rop, get_rop_net,
get_spec_rates] + (
[__h_call_wrapper, __cp_call_wrapper] if conp else
[__u_call_wrapper, __cv_call_wrapper]) + [
get_molar_rates, __temperature_wrapper],
allint=allint, return_kernel=return_kernel)
def _make_array(self, array):
"""
Creates an array for comparison to an autorun kernel from the result
of __get_jacobian
Parameters
----------
array : :class:`numpy.ndarray`
The input Jacobian array
Returns
-------
reshaped : :class:`numpy.ndarray`
The reshaped / reordered array for comparison to the autorun
kernel
"""
for i in range(array.shape[0]):
# reshape inner array
array[i, :, :] = np.reshape(array[i, :, :].flatten(order='K'),
array.shape[1:],
order='F')
return array
def _get_jacobian(self, func, kernel_call, editor, ad_opts, conp, extra_funcs=[],
return_kernel=False, **kwargs):
"""
Computes an autodifferentiated kernel, exposed to external classes in order
to share with the :mod:`functional_tester`
Parameters
----------
func: Callable
The function to autodifferentiate
kernel_call: :class:`kernel_call`
The kernel call with arguements, etc. to use
editor: :class:`editor`
The jacobian editor responsible for creating the AD kernel
ad_opts: :class:`loopy_options`
The AD enabled loopy options object
extra_funcs: list of Callable
Additional functions that must be called before :param:`func`.
These can be used to chain together functions to find derivatives of
complicated values (e.g. ROP)
return_kernel: bool [False]
If True, return a callable function that takes as as an arguement the
new kernel_call w/ updated args and returns the result
Note: The user is responsible for checking that the arguements are of
valid shape
kwargs: dict
Additional args for :param:`func
Returns
-------
ad_jac : :class:`numpy.ndarray`
The resulting autodifferentiated jacobian. The shape of which depends on
the values specified in the editor
"""
# find rate info
rate_info = determine_jac_inds(
self.store.reacs,
self.store.specs,
ad_opts.rate_spec)
# create namestore
namestore = arc.NameStore(ad_opts, rate_info, conp,
self.store.test_size)
# get kw args this function expects
def __get_arg_dict(check, **in_args):
try:
# py2-3 compat
arg_count = check.func_code.co_argcount
args = check.func_code.co_varnames[:arg_count]
except AttributeError:
arg_count = check.__code__.co_argcount
args = check.__code__.co_varnames[:arg_count]
args_dict = {}
for k, v in six.iteritems(in_args):
if k in args:
args_dict[k] = v
return args_dict
# create the kernel info
infos = []
info = func(ad_opts, namestore,
test_size=self.store.test_size,
**__get_arg_dict(func, **kwargs))
infos.extend(utils.listify(info))
# create a dummy kernel generator
knl = k_gen.make_kernel_generator(
kernel_type=KernelType.jacobian,
loopy_opts=ad_opts,
kernels=infos,
namestore=namestore,
test_size=self.store.test_size,
extra_kernel_data=[editor.output]
)
knl._make_kernels()
# get list of current args
have_match = kernel_call.strict_name_match
new_args = []
new_kernels = []
for k in knl.kernels:
if have_match and kernel_call.name != k.name:
continue
new_kernels.append(k)
for arg in k.args:
if arg not in new_args and not isinstance(
arg, lp.TemporaryVariable):
new_args.append(arg)
knl = new_kernels[:]
# generate dependencies with full test size to get extra args
def __raise(f):
raise SkipTest('Mechanism {} does not contain derivatives corresponding to '
'{}'.format(self.store.gas.name, f.__name__))
infos = []
for f in extra_funcs:
info = f(ad_opts, namestore,
test_size=self.store.test_size,
**__get_arg_dict(f, **kwargs))
is_skip = editor.skip_on_missing is not None and \
f == editor.skip_on_missing
if is_skip and any(x is None for x in utils.listify(info)):
# empty map (e.g. no PLOG)
__raise(f)
infos.extend([x for x in utils.listify(info) if x is not None])
for i in infos:
for arg in i.kernel_data:
if arg not in new_args and not isinstance(
arg, lp.TemporaryVariable):
new_args.append(arg)
for i in range(len(knl)):
knl[i] = knl[i].copy(args=new_args[:])
# and a generator for the single kernel
single_name = arc.NameStore(ad_opts, rate_info, conp, 1)
single_info = []
for f in extra_funcs + [func]:
info = f(ad_opts, single_name,
test_size=1,
**__get_arg_dict(f, **kwargs))
for i in utils.listify(info):
if f == func and have_match and kernel_call.name != i.name:
continue
if i is None:
# empty map (e.g. no PLOG)
continue
single_info.append(i)
single_knl = k_gen.make_kernel_generator(
kernel_type=KernelType.species_rates,
| |
# Import kivy tools
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.recycleboxlayout import RecycleBoxLayout
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.checkbox import CheckBox
from kivy.uix.spinner import Spinner
from kivy.uix.recycleview import RecycleView
from kivy.uix.recycleview.views import RecycleDataViewBehavior
from kivy.uix.behaviors import FocusBehavior
from kivy.uix.recycleview.layout import LayoutSelectionBehavior
from kivy.properties import BooleanProperty, ObjectProperty
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.lang import Builder
# Import the kv files
Builder.load_file('./src/rv.kv')
Builder.load_file('./src/screenhome.kv')
Builder.load_file('./src/screenprofile.kv')
Builder.load_file('./src/screensettings.kv')
Builder.load_file('./src/screenproduct.kv')
Builder.load_file('./src/screenquantities.kv')
Builder.load_file('./src/screenfinal.kv')
Builder.load_file('./src/manager.kv')
# Other imports
import pandas as pd
import re
from Algo_main import algo # Import the algorithm for NutriScore computation
class SelectableRecycleBoxLayout(FocusBehavior, LayoutSelectionBehavior,
RecycleBoxLayout):
''' Add selection and focus behaviour to the view '''
pass
class SelectableGrid(RecycleDataViewBehavior, GridLayout):
''' Add selection support to the Label '''
index = None
selected = BooleanProperty(False)
selectable = BooleanProperty(True)
def refresh_view_attrs(self, rv, index, data):
''' Catch and handle the view changes '''
self.index = index
self.ids['id_label1'].text = data['label1']['text']
self.ids['id_label2'].text = data['label2']['text']
self.ids['id_label3'].text = data['label3']['text']
return super(SelectableGrid, self).refresh_view_attrs(
rv, index, data)
def on_touch_down(self, touch):
''' Add selection on touch down '''
if super(SelectableGrid, self).on_touch_down(touch):
return True
if self.collide_point(*touch.pos) and self.selectable:
return self.parent.select_with_touch(self.index, touch)
def apply_selection(self, rv, index, is_selected):
''' Respond to the selection of items '''
self.selected = is_selected
class SelectableQuantity(RecycleDataViewBehavior, GridLayout):
''' Add selection support to the Label '''
index = None
selected = BooleanProperty(False)
selectable = BooleanProperty(True)
def refresh_view_attrs(self, rv, index, data):
''' Catch and handle the view changes '''
self.index = index
self.ids['id_label1'].text = data['label1']['text']
self.ids['id_label2'].text = data['label2']['text']
self.ids['id_label3'].text = data['label3']['text']
return super(SelectableQuantity, self).refresh_view_attrs(
rv, index, data)
class RV(RecycleView):
''' Class for the RecycleView Controller '''
def __init__(self, **kwargs):
super(RV, self).__init__(**kwargs)
def upload(self, query, active):
''' Search data according to the user input '''
# Reset data
self.data = []
# Check if the Raw Food CheckBox is active or not
if active:
self.parent.parent.getSelection('API', query, True)
self.data = [{'label1': {'text': 'API'}, 'label2': {'text': query}, 'label3': {'text': 'Add/Remove'}}]
else:
isinside = allTrue
for item in query.split(): # Split the query in keywords
isinside = isinside & \
(DF['product_name'].str.contains(item, case=False) | \
DF['Brands'].str.contains(item, case=False))
if any(isinside):
selection = DF[isinside] # Select products to display
for row in selection.itertuples(): # Iterate through the columns of DF
d = {'label1': {'text': str(row[0])}, \
'label2': {'text': str(row[1])},
'label3': {'text': str(row[-1])}} # barcode, product_name, brand
self.data.append(d)
else:
isinside = DF.index.str.contains(query, case=False) # Search for Barcode
if any(isinside):
selection = DF[isinside]
for row in selection.itertuples():
d = {'label1': {'text': str(row[0])}, \
'label2': {'text': str(row[1])},
'label3': {'text': str(row[-1])}} # barcode, product_name, brand
self.data.append(d)
else:
# In case no product is found
self.data = [{'label1': {'text': ''}, \
'label2': {'text': 'No product found'}, 'label3': {'text': ''}}]
def getQuantities(self, dict):
''' Gather data for display on Quantities Screen '''
self.data = []
code = dict['code']
product_name = dict['product_name']
quantity = dict['quantity']
for index in range(len(code)):
d = {'label1': {'text': code[index]}, 'label2': {'text': product_name[index]}, \
'label3': {'text': quantity[index]}}
self.data.append(d)
class ScreenHome(Screen):
''' Class for the Home Screen. No variables or functions needed for this screen '''
pass
class ScreenProfile(Screen):
''' Class for the Profile Screen '''
def updateDF(self):
global DF
DF = pd.read_csv('https://drive.google.com/uc?export=download&id=1aLUh1UoQcS9lBa6oVRln-DuskxK5uK3y', \
index_col=[0], low_memory = False)
DF.to_csv('./data/OpenFoodFacts_final.csv.gz', compression='gzip')
self.ids['update'].text = 'Updated'
self.ids['update'].background_color = (0,1,0,1)
def update(self):
self.ids['update'].text = 'Updating'
self.ids['update'].background_color = (50/255,164/255,206/255,1)
class ScreenSettings(Screen):
''' Class for the Settings Screen '''
settings = {'rec': True,'name': '', 'surname': '', 'age': 0, 'sex': True, 'weight': 0, \
'email': '', 'activity': 0, 'days': 0}
id_profile = -999
def resetForm(self):
''' Reset the indicators of invalid input '''
self.ids.sex.color = (1,1,1,1)
self.ids.activity.color = (1,1,1,1)
self.ids.age.hint_text_color = (0.5, 0.5, 0.5, 1.0)
self.ids.weight.hint_text_color = (0.5, 0.5, 0.5, 1.0)
self.ids.days.hint_text_color = (0.5, 0.5, 0.5, 1.0)
self.ids.email.hint_text_color = (0.5, 0.5, 0.5, 1.0)
self.ids.name.hint_text_color = (0.5, 0.5, 0.5, 1.0)
self.ids.surname.hint_text_color = (0.5, 0.5, 0.5, 1.0)
def setForm(self, id_profile):
self.id_profile = id_profile
self.settings = {'rec': True,'name': '', 'surname': '', 'age': 0, 'sex': True, 'weight': 0, \
'email': '', 'activity': 0, 'days': 0}
if int(self.id_profile) >= 0:
self.ids.name.text = str(profile_list.iloc[self.id_profile]['name'])
self.ids.surname.text= str(profile_list.iloc[self.id_profile]['surname'])
self.ids.age.text = str(profile_list.iloc[self.id_profile]['age'])
if bool(profile_list.iloc[self.id_profile]['sex']):
self.ids.male.active = True
self.ids.female.active = False
else:
self.ids.male.active = False
self.ids.female.active = True
self.ids.weight.text = str(profile_list.iloc[self.id_profile]['weight'])
self.ids.email.text = str(profile_list.iloc[self.id_profile]['email'])
self.ids.days.text = str(profile_list.iloc[self.id_profile]['days'])
if int(profile_list.iloc[self.id_profile]['activity']) == 1.8:
self.ids.seated.active = False
self.ids.both.active = False
self.ids.standing.active = True
elif int(profile_list.iloc[self.id_profile]['activity']) == 1.6:
self.ids.seated.active = False
self.ids.both.active = True
self.ids.standing.active = False
else:
self.ids.seated.active = True
self.ids.both.active = False
self.ids.standing.active = False
elif int(self.id_profile) == -999:
self.ids.name.text = ''
self.ids.surname.text = ''
self.ids.age.text = ''
self.ids.male.active = False
self.ids.female.active = False
self.ids.email.text = ''
self.ids.weight.text = ''
self.ids.seated.active = False
self.ids.both.active = False
self.ids.standing.active = False
self.ids.days.text = ''
else:
self.changeScreen(False)
def changeScreen(self, valid):
''' Handle the validity of the inputs and the change of current screen '''
if valid:
self.resetForm()
# Check name validity
if self.ids.name.text.strip() == '':
self.ids.name.hint_text_color = (1,0,0,1)
return False
# Check surname validity
elif self.ids.surname.text.strip() == '':
self.ids.surname.hint_text_color = (1,0,0,1)
return False
# Check age validity
elif self.ids.age.text.strip() == '' or int(self.ids.age.text) <= 0 or \
int(self.ids.age.text) >= 120:
self.ids.age.text = ''
self.ids.age.hint_text_color = (1,0,0,1)
return False
# Check sex validity
elif not(self.ids.male.active or self.ids.female.active):
self.ids.sex.color = (1,0,0,1)
return False
# Check email validity
elif not re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", self.ids.email.text):
self.ids.email.text = ''
self.ids.email.hint_text_color = (1,0,0,1)
return False
# Check weight validity
elif self.ids.weight.text.strip() == '' or int(self.ids.weight.text) <= 0:
self.ids.weight.text = ''
self.ids.weight.hint_text_color = (1,0,0,1)
return False
# Check activity validity
elif not(self.ids.seated.active or self.ids.both.active or self.ids.standing.active):
self.ids.activity.color = (1,0,0,1)
return False
# Check days validity
elif self.ids.days.text.strip() == '' or int(self.ids.days.text) <= 0:
self.ids.days.text = ''
self.ids.days.hint_text_color = (1,0,0,1)
return False
else: # Validation of the form and reset
self.settings['rec'] = True
self.settings['name'] = self.ids.name.text
self.settings['surname'] = self.ids.surname.text
self.settings['age'] = int(self.ids.age.text)
self.settings['weight'] = int(self.ids.weight.text)
self.settings['email'] = self.ids.email.text
self.settings['days'] = int(self.ids.days.text)
self.settings['sex'] = self.ids.male.active
if self.ids.seated.active:
self.settings['activity'] = 1.4
if self.ids.both.active:
self.settings['activity'] = 1.6
if self.ids.standing.active:
self.settings['activity'] = 1.8
self.resetForm()
else: # If the user pass the settings screen
self.settings['rec'] = False
self.manager.setSettings(self.settings, self.id_profile)
# Change the current screen
self.manager.current = 'Product Screen'
class ScreenProduct(Screen):
''' Class for the Product Screen '''
temp_dict = {'code':'', 'product_name': ''}
def getSelection(self, text1, text2, state):
# Select or deselect temporarly a product
if state:
self.temp_dict['code'] = text1
self.temp_dict['product_name'] = text2
else:
self.temp_dict['code'] = ''
self.temp_dict['product_name'] = ''
class ScreenQuantities(Screen):
''' Class for the Quantities Screen '''
temp_dict = {'code': [], 'product_name': [], 'quantity': [], 'color': []}
def initQuantity(self, data):
''' Initialize the dictionary of the products '''
if self.temp_dict['quantity'] == []:
self.temp_dict = data
self.ids.rv.getQuantities(data)
def updateQuantity(self, index, text1, text2, text3):
''' Store the quantities input by the user '''
l = len(self.temp_dict['quantity'])
if text3 == '' or text3 == '-' or int(text3) < 0:
text3 = '0'
if index < l:
self.temp_dict['code'][index] = text1
self.temp_dict['product_name'][index] = text2
self.temp_dict['quantity'][index] = text3
# Append the list of quantities if needed
else:
temp = ['0' for i in range(index-l)]
self.temp_dict['code'] = self.temp_dict['code'] + temp + [text1]
self.temp_dict['product_name'] = self.temp_dict['product_name'] + temp + [text2]
self.temp_dict['quantity'] = self.temp_dict['quantity'] + temp + [text3]
# Update the data displayed
self.initQuantity(self.temp_dict)
class ScreenFinal(Screen):
''' Class for the Final Screen. No variables or functions needed for this screen '''
pass
class Manager(ScreenManager):
''' Class for the Manager Controller. Store main data '''
selected_products = {'code': [], 'product_name': [], 'quantity': []}
settings = {'Rec': True, 'Name': '', 'Surname': '', 'Email': '', 'Age': 0, 'Sex': True, 'Pal': 0, \
'Weight': 0, 'Day': 0}
def getProfiles(self):
self.ids.screen_profile.ids.profile_spinner.values = \
[str(index + 1) + ' : ' + str(profile_list['name'][index]) + ' ' + str(profile_list['surname'][index]) \
for index in profile_list.index]
def toSettings(self, text):
if text == 'new':
id_profile = -999
elif text == 'pass':
id_profile = -1000
else:
items = text.split()
id_profile = items[0].strip()
id_profile = int(id_profile) - 1
self.ids.screen_settings.setForm(id_profile)
if id_profile != -1000:
self.current = 'Settings Screen'
def addProduct(self):
''' Add product to main storage '''
item1 = self.ids.screen_product.temp_dict['code']
item2 = self.ids.screen_product.temp_dict['product_name']
if item1 != '' and item2 != '':
self.selected_products['code'].append(item1)
self.selected_products['product_name'].append(item2)
self.selected_products['quantity'].append('0')
def deleteProduct(self):
''' Remove product of main storage '''
item1 = self.ids.screen_product.temp_dict['code']
item2 | |
import logging
import pandas as pd
from django.contrib import messages
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator
from django.db.models import Count, Prefetch, Q
from django.forms import formset_factory
from django.http import HttpRequest, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils import timezone
from zoo_checks.ingest import TRACKS_REQ_COLS
from .forms import (
AnimalCountForm,
ExportForm,
GroupCountForm,
SpeciesCountForm,
TallyDateForm,
UploadFileForm,
)
from .helpers import (
clean_df,
get_init_anim_count_form,
get_init_group_count_form,
get_init_spec_count_form,
qs_to_df,
set_formset_order,
today_time,
)
from .ingest import ExcelUploadError, handle_upload, ingest_changesets
from .models import (
Animal,
AnimalCount,
Enclosure,
Group,
GroupCount,
Role,
Species,
SpeciesCount,
User,
)
baselogger = logging.getLogger("zootable")
LOGGER = baselogger.getChild(__name__)
""" helpers that need models """
def get_accessible_enclosures(user: User):
# superuser sees all enclosures
if not user.is_superuser:
enclosures = Enclosure.objects.filter(roles__in=user.roles.all()).distinct()
else:
enclosures = Enclosure.objects.all()
return enclosures
def redirect_if_not_permitted(request: HttpRequest, enclosure: Enclosure) -> bool:
"""
Returns
-------
True if user does not belong to enclosure or if not superuser
False if user belongs to enclosure or is superuser
"""
if request.user.is_superuser or request.user.roles.filter(enclosures=enclosure):
return False
messages.error(
request, f"You do not have permissions to access enclosure {enclosure.name}"
)
LOGGER.error(
(
"Insufficient permissions to access enclosure"
f" {enclosure.name}, user: {request.user.username}"
)
)
return True
def enclosure_counts_to_dict(enclosures, animal_counts, group_counts) -> dict:
"""
repackage enclosure counts into dict for template render
dict order of enclosures is same as list/query order
not using defaultdict(list) because django templates have difficulty with them
"""
def create_counts_dict(enclosures, counts) -> dict:
"""Takes a list/queryset of counts and enclosures
Returns a dictionary
- keys: enclosures
- values: list of counts belonging to the enclosure
We do this (once) in order to be able to iterate over counts for each enclosure
"""
counts_dict = {}
for enc in enclosures:
counts_dict[enc] = []
[counts_dict[c.enclosure].append(c) for c in counts]
return counts_dict
def separate_conditions(counts) -> dict:
"""
Arguments: Animal counts
Returns: dictionary
- keys: condition names
- values: list of counts
"""
cond_dict = {}
for cond in AnimalCount.CONDITIONS:
cond_dict[cond[1]] = [] # init to empty list
[cond_dict[c.get_condition_display()].append(c) for c in counts]
return cond_dict
def separate_group_count_attributes(counts) -> dict:
"""
Arguments: Group counts (typically w/in an enclosure)
Returns: dictionary
- keys: Seen, BAR, Needs Attn
- values: sum of group counts within each key
"""
count_dict = {}
count_dict["Seen"] = sum([c.count_seen for c in counts])
count_dict["BAR"] = sum([c.count_bar for c in counts])
count_dict["Needs Attn"] = sum([c.needs_attn for c in counts])
return count_dict
enc_anim_ct_dict = create_counts_dict(enclosures, animal_counts)
enc_group_ct_dict = create_counts_dict(enclosures, group_counts)
counts_dict = {}
for enc in enclosures:
enc_anim_counts_sum = sum(
[
c.condition in [o_c[0] for o_c in AnimalCount.OBSERVED_CONDITIONS]
for c in enc_anim_ct_dict[enc]
]
)
enc_group_counts_sum = sum(
[c.count_seen + c.count_bar for c in enc_group_ct_dict[enc]]
)
total_groups = sum([g.population_total for g in enc.groups.all()])
counts_dict[enc] = {
"animal_count_total": enc_anim_counts_sum,
"animal_conditions": separate_conditions(enc_anim_ct_dict[enc]),
"group_counts": separate_group_count_attributes(enc_group_ct_dict[enc]),
"group_count_total": enc_group_counts_sum,
"total_animals": enc.animals.count(),
"total_groups": total_groups,
}
return counts_dict
def get_selected_role(request: HttpRequest):
# user requests view all
if request.GET.get("view_all", False):
request.session.pop("selected_role", None)
return
# might have a default selected role in session
# or might be requesting a selected role
else:
# default selected role, gets cleared if you log out
default_role = request.session.get("selected_role", None)
# get role query param (default_role if not found)
role_name = request.GET.get("role", default_role)
if role_name is not None:
try:
request.session["selected_role"] = role_name
return Role.objects.get(slug=role_name)
except ObjectDoesNotExist:
# role probably changed or bad query
messages.info(request, "Selected role not found")
request.session.pop("selected_role", None)
LOGGER.info(f"role not found and removed from session: {role_name}")
return
else:
return
""" views """
@login_required
# TODO: logins may not be sufficient - user a part of a group?
# TODO: add pagination
def home(request: HttpRequest):
enclosures_query = get_accessible_enclosures(request.user)
# only show enclosures that have active animals/groups
query = Q(animals__active=True) | Q(groups__active=True)
selected_role = get_selected_role(request)
if selected_role is not None:
query = query & Q(roles=selected_role)
# prefetching in order to build up the info displayed for each enclosure
groups_prefetch = Prefetch("groups", queryset=Group.objects.filter(active=True))
animals_prefetch = Prefetch("animals", queryset=Animal.objects.filter(active=True))
enclosures_query = (
enclosures_query.prefetch_related(groups_prefetch, animals_prefetch)
.filter(query)
.distinct()
)
paginator = Paginator(enclosures_query, 10)
page = request.GET.get("page", 1)
enclosures = paginator.get_page(page)
page_range = range(
max(int(page) - 5, 1), min(int(page) + 5, paginator.num_pages) + 1
)
roles = request.user.roles.all()
cts = Enclosure.all_counts(enclosures)
enclosure_cts_dict = enclosure_counts_to_dict(enclosures, *cts)
return render(
request,
"home.html",
{
"enclosures": enclosures,
"cts_dict": enclosure_cts_dict,
"page_range": page_range,
"roles": roles,
"selected_role": selected_role,
},
)
@login_required
def count(request: HttpRequest, enclosure_slug, year=None, month=None, day=None):
enclosure = get_object_or_404(Enclosure, slug=enclosure_slug)
if redirect_if_not_permitted(request, enclosure):
return redirect("home")
if None in [year, month, day]:
dateday = today_time()
else:
dateday = timezone.make_aware(timezone.datetime(year, month, day))
if dateday.date() == today_time().date():
count_today = True
else:
count_today = False
enclosure_animals = (
enclosure.animals.filter(active=True)
.order_by("species__common_name", "name", "accession_number")
.select_related("species")
)
enclosure_groups = (
enclosure.groups.filter(active=True)
.order_by("species__common_name", "accession_number")
.select_related("species")
)
enclosure_species = enclosure.species().order_by("common_name")
SpeciesCountFormset = formset_factory(SpeciesCountForm, extra=0)
GroupCountFormset = formset_factory(GroupCountForm, extra=0)
AnimalCountFormset = formset_factory(AnimalCountForm, extra=0)
species_counts_on_day = SpeciesCount.counts_on_day(
enclosure_species, enclosure, day=dateday
)
init_spec = get_init_spec_count_form(
enclosure, enclosure_species, species_counts_on_day
)
group_counts_on_day = GroupCount.counts_on_day(enclosure_groups, day=dateday)
init_group = get_init_group_count_form(enclosure_groups, group_counts_on_day)
animal_counts_on_day = AnimalCount.counts_on_day(enclosure_animals, day=dateday)
init_anim = get_init_anim_count_form(enclosure_animals, animal_counts_on_day)
# if this is a POST request we need to process the form data
if request.method == "POST":
# create a form instance and populate it with data from the request:
species_formset = SpeciesCountFormset(
request.POST, initial=init_spec, prefix="species_formset"
)
groups_formset = GroupCountFormset(
request.POST, initial=init_group, prefix="groups_formset"
)
# TODO: Test to make sure we are editing the correct animal counts
animals_formset = AnimalCountFormset(
request.POST,
initial=init_anim,
prefix="animals_formset",
)
# check whether it's valid:
if (
species_formset.is_valid()
and animals_formset.is_valid()
and groups_formset.is_valid()
):
def save_form_in_formset(form):
# TODO: move this into model/(form?) and overwrite the save method
if form.has_changed():
instance = form.save(commit=False)
instance.user = request.user
# if setting count for a diff day than today, set the date/datetime
if not count_today:
instance.datetimecounted = (
dateday
+ timezone.timedelta(days=1)
- timezone.timedelta(seconds=1)
)
instance.datecounted = dateday.date()
instance.update_or_create_from_form()
# process the data in form.cleaned_data as required
for formset in (species_formset, animals_formset, groups_formset):
for form in formset:
save_form_in_formset(form)
messages.success(request, "Saved")
LOGGER.info("Saved counts")
return redirect(
"count",
enclosure_slug=enclosure.slug,
year=dateday.year,
month=dateday.month,
day=dateday.day,
)
else:
(
formset_order,
species_formset,
groups_formset,
animals_formset,
) = set_formset_order(
enclosure,
enclosure_species,
enclosure_groups,
enclosure_animals,
species_formset,
groups_formset,
animals_formset,
dateday,
)
messages.error(request, "There was an error processing the form")
LOGGER.error("Error in processing the form")
# if a GET (or any other method) we'll create a blank form
else:
species_formset = SpeciesCountFormset(
initial=init_spec, prefix="species_formset"
)
groups_formset = GroupCountFormset(initial=init_group, prefix="groups_formset")
animals_formset = AnimalCountFormset(
initial=init_anim,
prefix="animals_formset",
)
(
formset_order,
species_formset,
groups_formset,
animals_formset,
) = set_formset_order(
enclosure,
enclosure_species,
enclosure_groups,
enclosure_animals,
species_formset,
groups_formset,
animals_formset,
dateday,
)
dateform = TallyDateForm()
return render(
request,
"tally.html",
{
"dateday": dateday.date(),
"enclosure": enclosure,
"species_formset": species_formset,
"groups_formset": groups_formset,
"animals_formset": animals_formset,
"formset_order": formset_order,
"dateform": dateform,
"conditions": AnimalCount.CONDITIONS,
},
)
@login_required
def tally_date_handler(request: HttpRequest, enclosure_slug):
"""Called from tally page to change date tally"""
# if it's a POST: pull out the date from the cleaned data then send it to "count"
if request.method == "POST":
form = TallyDateForm(request.POST)
if form.is_valid():
target_date = form.cleaned_data["tally_date"]
return redirect(
"count",
enclosure_slug=enclosure_slug,
year=target_date.year,
month=target_date.month,
day=target_date.day,
)
else:
messages.error(request, "Error in date entered")
LOGGER.error("Error in date entered")
# if it's a GET: just redirect back to count method
return redirect("count", enclosure_slug=enclosure_slug)
@login_required
def edit_species_count(
request: HttpRequest, species_slug, enclosure_slug, year, month, day
):
species = get_object_or_404(Species, slug=species_slug)
enclosure = get_object_or_404(Enclosure, slug=enclosure_slug)
if redirect_if_not_permitted(request, enclosure):
return redirect("home")
dateday = timezone.make_aware(timezone.datetime(year, month, day))
count = species.count_on_day(enclosure, day=dateday)
init_form = {
"count": 0 if count is None else count.count,
"species": species,
"enclosure": enclosure,
}
if request.method == "POST":
form = SpeciesCountForm(request.POST, init_form)
if form.is_valid():
# save the data
if form.has_changed():
obj = form.save(commit=False)
obj.user = request.user
# force insert because otherwise it always updated
obj.id = None
if dateday.date() == timezone.localdate():
obj.datetimecounted = timezone.localtime()
else:
obj.datetimecounted = (
dateday
+ timezone.timedelta(days=1)
- timezone.timedelta(seconds=1)
)
obj.datecounted = dateday
obj.update_or_create_from_form()
return redirect("count", enclosure_slug=enclosure.slug)
else:
form = SpeciesCountForm(initial=init_form)
return render(
request,
"edit_species_count.html",
{
"form": form,
"count": count,
"species": species,
"enclosure": enclosure,
"dateday": dateday,
},
)
@login_required
def edit_group_count(request: HttpRequest, group, year, month, day):
group = get_object_or_404(
Group.objects.select_related("enclosure", "species"), accession_number=group
)
enclosure = group.enclosure
if redirect_if_not_permitted(request, enclosure):
return redirect("home")
dateday = timezone.make_aware(timezone.datetime(year, month, day))
count = group.count_on_day(day=dateday)
init_form = {
"count_seen": 0 if count is None else count.count_seen,
"count_bar": 0 if count is None else count.count_bar,
"comment": "" if count is None else count.comment,
"count_total": group.population_total,
"group": group,
| |
"NULL", ""]
try:
schNo = getattr(Parcel,schDistNoField)
schNa = getattr(Parcel,schDistField)
pinToTest = getattr(Parcel,pinField)
year = getattr(Parcel,yearField)
if schNo is not None and schNa is not None:
'''schNa = schNa.replace("SCHOOL DISTRICT", "").replace("SCHOOL DISTIRCT", "").replace("SCHOOL DIST","").replace("SCHOOL DIST.", "").replace("SCH DIST", "").replace("SCHOOL", "").replace("SCH D OF", "").replace("SCH", "").replace("SD", "").strip()'''
try:
if schNo != schNameNoDict[schNa] or schNa != schNoNameDict[schNo]:
getattr(Parcel,errorType + "Errors").append("The values provided in " + schDistNoField.upper() + " and " + schDistField.upper() + " field do not match. Please verify values are in acceptable domain list.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
Error.flags_dict['schoolDist'] += 1
except:
getattr(Parcel,errorType + "Errors").append("One or both of the values in the SCHOOLDISTNO field or SCHOOLDIST field are not in the acceptable domain list. Please verify values.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
return (Error,Parcel)
if schNo is None and schNa is not None:
'''schNa = schNa.replace("SCHOOL DISTRICT", "").replace("SCHOOL DISTIRCT", "").replace("SCHOOL DIST","").replace("SCHOOL DIST.", "").replace("SCH DIST", "").replace("SCHOOL", "").replace("SCH D OF", "").replace("SCH", "").replace("SD", "").strip()'''
if schNa not in schNameNoDict:
getattr(Parcel,errorType + "Errors").append("The value provided in " + schDistField.upper() + " is not within the acceptable domain list. Please verify value.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
Error.flags_dict['schoolDist'] += 1
if schNa is None and schNo is not None:
if schNo not in schNoNameDict or len(schNo) != 4:
getattr(Parcel,errorType + "Errors").append("The value provided in " + schDistNoField.upper() + " is not within the acceptable domain list or is not 4 digits long as expected. Please verify value.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
Error.flags_dict['schoolDist'] += 1
if schNo is None and schNa is None and pinToTest not in ignoreList and pinToTest is not None and (year is not None and int(year) <= 2018):
getattr(Parcel,errorType + "Errors").append("Both the " + schDistNoField.upper() + " & the " + schDistField.upper() + " are <Null> and a value is expected.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
Error.flags_dict['schoolDist'] += 1
return (Error,Parcel)
except:
getattr(Parcel,errorType + "Errors").append("An unknown issue occurred with the " + schDistField.upper() + " or " + schDistNoField.upper() + " field. Please inspect the values of these fields.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
return (Error, Parcel)
def fieldCompleteness(Error,Parcel,fieldList,passList,CompDict):
for field in fieldList:
if field.upper() in passList:
pass
else:
stringToTest = getattr(Parcel,field.lower())
if stringToTest is None:
pass
else:
if stringToTest is not None or stringToTest != '':
CompDict[field] = CompDict[field]+1
return(Error,Parcel)
def fieldCompletenessComparison(Error,fieldList,passList,currentStatDict,previousStatDict):
for field in fieldList:
if field.upper() in passList:
pass
else:
if previousStatDict[field] > 0:
Error.comparisonDict[field] = round((100*(currentStatDict[field] - previousStatDict[field])/ previousStatDict[field]),2)
elif previousStatDict[field] == 0 and currentStatDict[field] == 0 :
Error.comparisonDict[field] = 0.0
elif previousStatDict[field] == 0 :
Error.comparisonDict[field] = 100.0
#Error.comparisonDict[field] = round((100*(currentStatDict[field] - previousStatDict[field])/(Error.recordTotalCount)),2)
return(Error)
#checkSchemaFunction
def checkSchema(Error,inFc,schemaType,fieldPassLst):
fieldList = arcpy.ListFields(inFc)
realFieldList = []
fieldDictNames = {}
incorrectFields = []
excessFields = []
missingFields = []
var = True
arcpy.AddMessage("Checking for all appropriate fields in " + str(inFc) + "...")
for field in fieldList:
fieldDictNames[field.name] = [[field.type],[field.length]]
#if error fields already exits, delete them
for field in fieldList:
if field.name == 'GeneralElementErrors':
arcpy.DeleteField_management(inFc, ['GeneralElementErrors','AddressElementErrors','TaxrollElementErrors','GeometricElementErrors'])
for field in fieldDictNames:
if field.upper() not in fieldPassLst:
if field not in schemaType.keys():
excessFields.append(field)
var = False
elif fieldDictNames[field][0][0] not in schemaType[field][0] or fieldDictNames[field][1][0] not in schemaType[field][1]:
incorrectFields.append(field)
var = False
else:
missingFields = [i for i in schemaType.keys() if i not in fieldDictNames.keys()]
if len(missingFields) > 0:
var = False
if var == False:
arcpy.AddMessage("\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
arcpy.AddMessage(" IMMEDIATE ERROR REQUIRING ATTENTION")
arcpy.AddMessage("\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
arcpy.AddMessage("CERTAIN FIELDS DO NOT MEET THE PARCEL SCHEMA REQUIREMENTS.\n")
if len(incorrectFields) > 0:
arcpy.AddMessage("THE PROBLEMATIC FIELDS INCLUDE: (" + str(incorrectFields).strip("[").strip("]").replace('u','') + ")\n")
arcpy.AddMessage("------->> PLEASE CHECK TO MAKE SURE THE NAMES, DATA TYPES, AND LENGTHS MATCH THE SCHEMA REQUIREMENTS.\n")
if len(excessFields) > 0:
arcpy.AddMessage("THE EXCESS FIELDS INCLUDE: (" + str(excessFields).strip("[").strip("]").replace('u','') + ")\n")
arcpy.AddMessage("------->> PLEASE REMOVED FIELDS THAT ARE NOT IN THE PARCEL ATTRIBUTE SCHEMA.\n")
if len(missingFields) > 0:
arcpy.AddMessage("THE MISSING FIELDS INCLUDE: (" + str(missingFields).strip("[").strip("]").replace('u','') + ")\n")
arcpy.AddMessage("------->> PLEASE ADD FIELDS THAT ARE NOT IN THE PARCEL ATTRIBUTE SCHEMA.\n")
arcpy.AddMessage("PLEASE MAKE NEEDED ALTERATIONS TO THESE FIELDS AND RUN THE TOOL AGAIN.\n")
arcpy.AddMessage("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
exit()
#check for valid postal address
# Error.postalCheck(totError,currParcel,'pstladress','general',pinSkips,'taxrollyear','parcelid',badPstladdSet)
def postalCheck (Error,Parcel,PostalAd,errorType,ignoreList,taxYear,pinField,badPstladdSet, acceptYears):
nullList = ["<Null>", "<NULL>", "NULL", ""]
try:
address = getattr(Parcel,PostalAd)
year = getattr(Parcel, taxYear)
pinToTest = getattr(Parcel,pinField)
if address is None:
pass
else:
if year is not None:
if int(year) <= int(acceptYears[1]): #or pinToTest in ignorelist:
if ('UNAVAILABLE' in address or 'ADDRESS' in address or 'ADDDRESS' in address or 'UNKNOWN' in address or ' 00000' in address or 'NULL' in address or ('NONE' in address and 'HONONEGAH' not in address) or 'MAIL EXEMPT' in address or 'TAX EX' in address or 'UNASSIGNED' in address or 'N/A' in address) or(address in badPstladdSet) or any(x.islower() for x in address):
getattr(Parcel,errorType + "Errors").append("A value provided in the " + PostalAd.upper() + " field may contain an incomplete address. Please verify the value is correct or set to <Null> if complete address is unknown.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
Error.flags_dict['postalCheck'] += 1
elif address in nullList or address.isspace():
Error.flags_dict['postalCheck'] += 1
Error.badcharsCount += 1 #for wrong <null> values
else:
pass
return(Error,Parcel)
except:
getattr(Parcel,errorType + "Errors").append("An unknown issue occurred with the PSTLADRESS field. Please inspect the value of this field.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
return (Error, Parcel)
#totError = Error.checkBadChars (totError )
def checkBadChars(Error ):
if Error.badcharsCount >= 100:
arcpy.AddMessage("\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
arcpy.AddMessage("THERE ARE AT LEAST 100 INSTANCES OF THE STRINGS '<Null>', \'NULL\', BLANKS AND/OR LOWER CASE CHARACTERS WITHIN THE ATTRIBUTE TABLE. \n")
arcpy.AddMessage("RUN THE \"NULL FIELDS AND SET THE UPPERCASE TOOL\" AVAILABLE HERE: https://www.sco.wisc.edu/parcels/tools \n")
arcpy.AddMessage("ONCE COMPLETE, RUN VALIDATION TOOL AGAIN.\n")
arcpy.AddMessage("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
exit()
def totalAssdValueCheck(Error,Parcel,cnt,lnd,imp,errorType):
try:
cnt = 0.0 if (getattr(Parcel,cnt) is None) else float(getattr(Parcel,cnt))
lnd = 0.0 if (getattr(Parcel,lnd) is None) else float(getattr(Parcel,lnd))
imp = 0.0 if (getattr(Parcel,imp) is None) else float(getattr(Parcel,imp))
if lnd + imp <> cnt:
getattr(Parcel,errorType + "Errors").append("CNTASSDVALUE is not equal to LNDVALUE + IMPVALUE as expected. Correct this issue and refer to the submission documentation for futher clarification as needed.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
Error.flags_dict['cntCheck'] += 1
return(Error,Parcel)
except:
getattr(Parcel,errorType + "Errors").append("An unknown issue occurred when comparing your CNTASSDVALUE value to the sum of LNDVALUE and IMPVALUE. Please inspect these fields.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
return (Error, Parcel)
# parcels with MFLValue should have auxclass of W1-W3 or W5-W9
def mfLValueCheck(Error, Parcel, mflvalue, auxField, errorType):
try:
mflValueTest = getattr(Parcel,mflvalue)
auxToTest = getattr(Parcel,auxField)
if mflValueTest is None or float(mflValueTest) == 0.0:
if auxToTest is not None and re.search('W', auxToTest) is not None and re.search('AW', auxToTest) is None and re.search('W4', auxToTest) is None:
getattr(Parcel, errorType + "Errors").append("A <null> value provided in MFLVALUE field does not match the (" + str(auxToTest) + ") AUXCLASS value(s). Refer to submission documentation for verification.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
Error.flags_dict['mflvalueCheck'] += 1
elif mflValueTest is not None and float(mflValueTest) > 0.0:
if auxToTest is None:
getattr(Parcel, errorType + "Errors").append("A <Null> value is expected in the MFLVALUE field according to the AUXCLASS field. Please verify.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
Error.flags_dict['mflvalueCheck'] += 1
elif re.search('W4', auxToTest) is not None:
getattr(Parcel, errorType + "Errors").append("MFLVALUE does not include properties with AUXCLASS value of W4. Please verify.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
Error.flags_dict['mflvalueCheck'] += 1
else:
pass
return(Error, Parcel)
except:
getattr(Parcel,errorType + "Errors").append("An unknown issue occurred with the MFLVALUE field. Please inspect the value of field.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
return (Error, Parcel)
def mflLndValueCheck(Error,Parcel,parcelidfield, parcelidList,lnd,mfl,errorType):
try:
lnd = 0.0 if (getattr(Parcel,lnd) is None) else float(getattr(Parcel,lnd))
mfl = 0.0 if (getattr(Parcel,mfl) is None) else float(getattr(Parcel,mfl))
parcelid = getattr(Parcel, parcelidfield)
if lnd == mfl and (lnd <> 0.0 and mfl <> 0.0):
Error.mflLnd += 1
if Error.mflLnd <= 10:
parcelidList.append (parcelid) # need to save parcelid to add flag if necessary
if Error.mflLnd > 10:
getattr(Parcel,errorType + "Errors").append("MFLVALUE should not equal LNDVALUE in most cases. Please correct this issue and refer to the submission documentation for further clarification as needed.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
Error.flags_dict['mflvalueCheck'] += 1
return(Error,Parcel)
except:
getattr(Parcel,errorType + "Errors").append("An unknown issue occurred with the MFLVALUE/LNDVALUE field. Please inspect these fields.")
setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
return (Error, Parcel)
# add flag to parcels that have more than 10 parcels with mflvalue == landvalue
#def addmflLandValueFlags (Error, outFC, fieldNames):
# errorType = "tax"
# with arcpy.da.UpdateCursor(output_fc_temp, fieldNames) as cursor:
# for row in cursor:
# for parcelid in parcelidList:
# if row[3] == parcelid:
# arcpy.AddMessage("sisi")
# arcpy.AddMessage(Parcel)
# getattr(Parcel,errorType + "Errors").append("MFLVALUE should not equal LNDVALUE in most cases. Please correct this issue and refer to the submission documentation for further clarification as needed.")
# setattr(Error,errorType + "ErrorCount", getattr(Error,errorType + "ErrorCount") + 1)
# return(Error,Parcel)
# checks that parcels with auxclass x1-x4 have taxroll values = <null>
def auxclassFullyX4Check (Error,Parcel,auxclassField,propclassField,errorType):
try:
auxclass = getattr(Parcel,auxclassField)
propclass = getattr(Parcel,propclassField)
taxRollFields = {'IMPVALUE': getattr(Parcel, "impvalue"), 'CNTASSDVALUE': getattr(Parcel, "cntassdvalue"),
'LNDVALUE': getattr(Parcel, "lndvalue"), 'MFLVALUE': getattr(Parcel, "mflvalue"),
'ESTFMKVALUE': getattr(Parcel, "estfmkvalue"),
'NETPRPTA': getattr(Parcel, "netprpta"), 'GRSPRPTA': getattr(Parcel, "grsprpta")}
probFields = []
if auxclass is not None:
if auxclass == 'X4' and propclass is None:
for key, val in taxRollFields.iteritems():
if val is not None:
probFields.append(key)
if len(probFields) > 0:
getattr(Parcel,errorType + "Errors").append("A <Null> value is | |
r.Link('%(bar)s', 'blah', '%(foo)s')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr2, "TestLink")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
a = os.lstat(util.joinPaths(self.workDir, 'foo'))
b = os.lstat(util.joinPaths(self.workDir, 'bar'))
c = os.lstat(util.joinPaths(self.workDir, 'blah'))
assert(a[stat.ST_INO] == b[stat.ST_INO])
assert(b[stat.ST_INO] == c[stat.ST_INO])
def testLinkDir(self):
recipe1 = """
class FooRecipe(PackageRecipe):
name = 'foo'
version = '1'
clearBuildReqs()
def setup(r):
r.MakeDirs('/var/foo', '/var/bar/')
r.Create('/var/foo/testme', contents='arbitrary data')
r.Link('/var/foo/tested', '/var/foo/testme')
"""
(built, d) = self.buildRecipe(recipe1, "FooRecipe")
self.updatePkg(built[0][0])
assert(os.lstat(self.rootDir + '/var/foo/testme').st_ino ==
os.lstat(self.rootDir + '/var/foo/tested').st_ino)
class MakeDirsTest(rephelp.RepositoryHelper):
def testMakeDirsTest1(self):
"""
Test creating directories
"""
recipestr1 = """
class TestMakeDirs(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.MakeDirs('foo')
self.Run('ls foo')
"""
(built, d) = self.buildRecipe(recipestr1, "TestMakeDirs")
recipestr2 = """
class TestMakeDirs(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.MakeDirs('/bar/blah')
self.ExcludeDirectories(exceptions='/bar/blah')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr2, "TestMakeDirs")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
assert(stat.S_ISDIR(
os.lstat(util.joinPaths(self.workDir, '/bar/blah'))[stat.ST_MODE]))
class SugidTest(rephelp.RepositoryHelper):
def testSugidTest1(self):
"""
Test to make sure that setu/gid gets restored.
Warning: this won't catch variances when running as root!
"""
recipestr1 = """
class TestSugid(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.Create('%(essentialbindir)s/a', mode=06755)
"""
self.reset()
(built, d) = self.buildRecipe(recipestr1, "TestSugid")
self.mimicRoot()
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
self.realRoot()
a = os.lstat(util.joinPaths(self.workDir, 'bin/a'))
assert (a.st_mode & 07777 == 06755)
class CreateTest(rephelp.RepositoryHelper):
def testCreateTest1(self):
"""
Test creating files directly
"""
recipestr1 = """
class TestCreate(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(self):
self.Create(('/a', '/b'))
self.Create('/c', '/d', contents='ABCDEFGABCDEFGABCDEFGABCDEFG')
self.Create('/e', contents='%(essentialbindir)s')
self.Create('/f', contents='%(essentialbindir)s', macros=False)
self.Create('%(essentialbindir)s/{g,h}', mode=0755)
"""
self.reset()
(built, d) = self.buildRecipe(recipestr1, "TestCreate")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
a = os.lstat(util.joinPaths(self.workDir, 'a'))
b = os.lstat(util.joinPaths(self.workDir, 'b'))
F = file(util.joinPaths(self.workDir, 'c'))
c = F.read()
F.close
F = file(util.joinPaths(self.workDir, 'd'))
d = F.read()
F.close
F = file(util.joinPaths(self.workDir, 'e'))
e = F.read()
F.close
F = file(util.joinPaths(self.workDir, 'e'))
e = F.read()
F.close
F = file(util.joinPaths(self.workDir, 'f'))
f = F.read()
F.close
g = os.lstat(util.joinPaths(self.workDir, '/bin/g'))
h = os.lstat(util.joinPaths(self.workDir, '/bin/g'))
assert (a.st_size == 0)
assert (b.st_size == 0)
assert (c == 'ABCDEFGABCDEFGABCDEFGABCDEFG\n')
assert (d == 'ABCDEFGABCDEFGABCDEFGABCDEFG\n')
assert (e == '/bin\n')
assert (f == '%(essentialbindir)s\n')
assert (g.st_mode & 0777 == 0755)
assert (h.st_mode & 0777 == 0755)
class SymlinkTest(rephelp.RepositoryHelper):
def testSymlinkTest1(self):
"""
Test creating files directly
"""
recipestr1 = """
class TestSymlink(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('/a')
r.Symlink('/one/argument')
"""
self.assertRaises(errors.CookError, self.buildRecipe,
recipestr1, "TestSymlink")
def testSymlinkTest2(self):
recipestr2 = """
class TestSymlink(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Symlink('/asdf/foo', '/bar/blah')
r.DanglingSymlinks(exceptions='.*')
"""
self.buildRecipe(recipestr2, "TestSymlink")
class DocTest(rephelp.RepositoryHelper):
def exists(self, file):
return os.path.exists(self.workDir + file)
def testDocs(self):
recipestr1 = """
class TestDocs(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('README')
r.Doc('README')
r.Create('docs/README.too')
r.Doc('docs/')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr1, "TestDocs")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
docdir = '/usr/share/doc/test-0/'
for file in 'README', 'docs/README.too':
assert(self.exists(docdir + file))
class ConfigureTest(rephelp.RepositoryHelper):
def testConfigure(self):
recipestr1 = """
class TestConfigure(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('configure', mode=0755, contents='''#!/bin/sh
exit 0
''')
r.Configure()
r.Create('/asdf/foo')
"""
(built, d) = self.buildRecipe(recipestr1, "TestConfigure")
# make sure that the package doesn't mention the bootstrap
# bootstrap flavor
assert(built[0][2].isEmpty())
def testConfigureSubDirMissingOK(self):
recipestr1 = """
class TestConfigure(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('configure', mode=0755, contents='''#!/bin/sh
touch mustNotExist
exit 0
''')
r.Configure(subDir='missing', skipMissingSubDir=True)
r.Run('test -f mustNotExist && exit 1 ; exit 0')
r.Create('/asdf/foo')
"""
(built, d) = self.buildRecipe(recipestr1, "TestConfigure")
def testConfigureSubDirMissingBad(self):
recipestr1 = """
class TestConfigure(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('configure', mode=0755, contents='''#!/bin/sh
exit 0
''')
r.Configure(subDir='missing')
r.Create('/asdf/foo')
"""
self.assertRaises(RuntimeError, self.buildRecipe,
recipestr1, "TestConfigure")
def testConfigureLocal(self):
recipestr1 = """
class TestConfigure(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('configure', mode=0755, contents='''#!/bin/sh -x
echo "$CONFIG_SITE" > $1
''')
r.MakeDirs('/make', '/conf')
r.ManualConfigure('%(destdir)s/conf/target')
r.ManualConfigure('%(destdir)s/conf/local', local=True)
r.Make('%(destdir)s/make/target', makeName='./configure')
r.Make('%(destdir)s/make/local', local=True, makeName='./configure')
# run again to make sure any state changed by Make was restored.
r.ManualConfigure('%(destdir)s/conf/target')
r.ManualConfigure('%(destdir)s/conf/local', local=True)
"""
self.overrideBuildFlavor('is:x86 target: x86_64')
(built, d) = self.buildRecipe(recipestr1, "TestConfigure")
self.updatePkg('test[is:x86 target:x86_64]')
for dir in ('%s/make/', '%s/conf'):
dir = dir % self.cfg.root
self.verifyFile('%s/local' % dir,
' '.join([ '%s/%s' % (self.cfg.siteConfigPath[0], x)
for x in ('x86', 'linux')]) + '\n')
self.verifyFile('%s/target' % dir,
' '.join([ '%s/%s' % (self.cfg.siteConfigPath[0], x)
for x in ('x86_64', 'linux')]) + '\n')
def testConfigureMissingReq(self):
recipestr = """
class TestConfigure(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('configure', mode=0755, contents='''#!/bin/sh
echo "$0: line 2000: foo: command not found"
# exit 1
''')
r.ManualConfigure()
r.Create('/opt/foo')
"""
self.logFilter.add()
self.assertRaises(RuntimeError, self.buildRecipe,
recipestr.replace('# exit', 'exit'),
"TestConfigure", logBuild = True)
self.logFilter.remove()
self.logFilter.regexpCompare([
'error: .*',
'warning: ./configure: line 2000: foo: command not found',
'warning: Failed to find possible build requirement for path "foo"',
])
# now repeat with foo in the repository but not installed
self.addComponent('foo:runtime', '1', fileContents = [
('/usr/bin/foo', rephelp.RegularFile(contents="", perms=0755)),])
repos = self.openRepository()
self.logFilter.add()
reportedBuildReqs = set()
self.mock(packagepolicy.reportMissingBuildRequires, 'updateArgs',
lambda *args:
mockedSaveArgSet(args[0], None, reportedBuildReqs, *args[1:]))
(built, d) = self.buildRecipe(recipestr, "TestConfigure",
logBuild = True, repos=repos)
self.logFilter.remove()
self.logFilter.compare([
'warning: ./configure: line 2000: foo: command not found',
"warning: Some missing buildRequires ['foo:runtime']",
])
self.assertEquals(reportedBuildReqs, set(['foo:runtime']))
self.unmock()
# now test with absolute path in error message
self.logFilter.add()
(built, d) = self.buildRecipe(recipestr.replace(
'foo: command not found', '/usr/bin/foo: command not found'),
"TestConfigure",
logBuild = True)
self.logFilter.remove()
self.logFilter.regexpCompare([
'warning: .*: /usr/bin/foo: command not found',
r"warning: Some missing buildRequires \['foo:runtime'\]",
])
# test that the logfile got the warning message
client = self.getConaryClient()
repos = client.getRepos()
nvf = [x for x in built if x[0] == 'test:debuginfo'][0]
nvf = repos.findTrove(self.cfg.buildLabel, nvf)
fileDict = client.getFilesFromTrove(*nvf[0])
fileObj = fileDict['/usr/src/debug/buildlogs/test-0-log.bz2']
b = bz2.BZ2Decompressor()
buildLog = b.decompress(fileObj.read())
self.assertFalse( \
"warning: Suggested buildRequires additions: ['foo:runtime']" \
not in buildLog)
# finally repeat with foo installed, not just in repository
self.updatePkg('foo:runtime')
self.logFilter.add()
reportedBuildReqs = set()
self.mock(packagepolicy.reportMissingBuildRequires, 'updateArgs',
lambda *args:
mockedSaveArgSet(args[0], None, reportedBuildReqs, *args[1:]))
(built, d) = self.buildRecipe(recipestr, "TestConfigure",
logBuild = True)
self.logFilter.remove()
self.logFilter.compare([
'warning: ./configure: line 2000: foo: command not found',
"warning: Some missing buildRequires ['foo:runtime']",
])
self.assertEquals(reportedBuildReqs, set(['foo:runtime']))
def testConfigureMissingReq2(self):
"""
test that regexp matching is not fooled by dir argument
"""
recipestr1 = """
class TestConfigure(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('configure', mode=0755, contents='''#!/bin/sh
echo "/random/path/configure: line 2000: foo: command not found"
''')
r.ManualConfigure('')
r.Create('/opt/foo')
"""
self.logFilter.add()
(built, d) = self.buildRecipe(recipestr1, "TestConfigure",
logBuild = True)
self.logFilter.remove()
self.logFilter.compare([
'warning: /random/path/configure: line 2000: foo: '
'command not found',
'warning: Failed to find possible build requirement for path "foo"',
])
class CMakeTest(rephelp.RepositoryHelper):
def testCMake(self):
if not util.checkPath('cmake'):
raise testhelp.SkipTestException('cmake not installed')
recipestr1 = """
class TestCMake(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.addSource('CMakeLists.txt', contents = '''\
PROJECT(floo)
ADD_EXECUTABLE(floo floo.c)
''')
r.addSource('floo.c', contents = '''
int main()
{
return 0;
}
''')
r.CMake()
r.Make()
r.Copy('floo', '/usr/bin/floo')
"""
(built, d) = self.buildRecipe(recipestr1, "TestCMake")
def testCMakeSubDir(self):
if not util.checkPath('cmake'):
raise testhelp.SkipTestException('cmake not installed')
# Same as previous test, but run in a subdir
recipestr1 = """
class TestCMake(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.Create('floo/CMakeLists.txt', contents = '''\
PROJECT(floo)
''')
r.CMake(dir = 'floo')
r.Copy('floo/Makefile', '/usr/share/floo/')
"""
(built, d) = self.buildRecipe(recipestr1, "TestCMake")
class RemoveTest(rephelp.RepositoryHelper):
def testRemove(self):
recipestr1 = """
class TestRemove(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.MakeDirs('a/b')
r.Create('a/file')
r.Install('a', '/a')
r.Remove('/a/*')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr1, "TestRemove")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
def testRemoveRecursive(self):
# Test for CNY-69
recipestr1 = """
class TestRemove(PackageRecipe):
name = 'testr'
version = '0.1'
clearBuildReqs()
def setup(r):
r.Create("%(datadir)s/%(name)s/dir1/file1", contents="file1")
r.Create("%(datadir)s/%(name)s/dir1/dir2/file2", contents="file2")
r.Create("%(datadir)s/%(name)s/dir1/dir2/dir3/file3", contents="file3")
r.Create("%(datadir)s/%(name)s/dir1/dir2/dir5/file4", contents="file4")
r.Remove("%(datadir)s/%(name)s/dir1/dir2", recursive=True)
"""
repos = self.openRepository()
oldVal = self.cfg.cleanAfterCook
self.cfg.cleanAfterCook = False
try:
(build, d) = self.buildRecipe(recipestr1, "TestRemove")
finally:
self.cfg.cleanAfterCook = oldVal
dr = os.path.join(self.workDir, '../build/testr/_ROOT_',
'usr/share/testr')
self.assertEqual(os.listdir(dr), ["dir1"])
def testUnmatchedRemove(self):
recipestr = """
class TestRemove(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.MakeDirs('/a')
"""
self.reset()
err = self.assertRaises(RuntimeError,
self.buildRecipe, recipestr + "r.Remove(r.glob('/a/*'))\n",
"TestRemove")
assert(str(err) == "Remove: No files matched: Glob('/a/*')")
err = self.assertRaises(RuntimeError,
self.buildRecipe, recipestr + "r.Remove('/a/*')\n",
"TestRemove")
assert(str(err) == "Remove: No files matched: '/a/*'")
err = self.assertRaises(RuntimeError,
self.buildRecipe,
recipestr + "r.Remove(r.glob('/a/*'), '/b/*')\n",
"TestRemove")
assert(str(err) == "Remove: No files matched: (Glob('/a/*'), '/b/*')")
def testUnmatchedRemove2(self):
recipestr = """
class TestRemove(PackageRecipe):
name = 'test'
version = '0'
clearBuildReqs()
def setup(r):
r.MakeDirs('/a')
r.Remove('/a/*', allowNoMatch = True)
"""
self.reset()
self.logFilter.add()
(built, d) = self.buildRecipe(recipestr, "TestRemove")
self.logFilter.remove()
self.assertEquals(self.logFilter.records[0],
"warning: Remove: No files matched: '/a/*'")
class BuildLabelTest(rephelp.RepositoryHelper):
def testBuildLabel(self):
recipestr1 = """
class TestBuildLabel(PackageRecipe):
name = 'test'
version = | |
Target Variable for Neural Network - this is the target variable
# ## ------------------------------------------------------------------------------------------------------
# In[30]: Declare some functions
def setTarget(df, targetVariable):
y = df[targetVariable]
return y
# In[31]: Set target variable
y = setTarget(beforeafterDF, targetVariable)
logger.info(' Y target variable as a pandas.series -> numpy.array ')
logger.info('----------------------------------------------------------')
logger.info(y.head())
logger.info(f'(y describe = {y.describe()}')
logger.info('----------------------------------------------------------')
# In[33]:
if( explore ):
logger.info(f'(y values from 0 to 10 = {y.values[0:10]}')
logger.info(f'(y head = {y.head()}')
logger.info(f'(y describe = {y.describe()}')
# ## ------------------------------------------------------------------------------------------------------
# ## Normalization
# ## ------------------------------------------------------------------------------------------------------
# ### Normalize the whole X
# In[35]: Declare some functions
def normalizeX(df):
"""Return a normalized value of df.
Save MinMaxScaler normalizer for X variable"""
scaler = MinMaxScaler(feature_range=(scaler_min, scaler_max))
# scaler.fit(df)
scaler.fit(df.astype(np.float64))
# normalized = scaler.transform(df)
normalized = scaler.transform(df.astype(np.float64))
# store MinMaxScaler for X
joblib.dump(scaler, 'models/scaler_normalizeX.save')
return normalized, scaler
# In[36]: Normalize Features and Save Normalized values, Normalize input variables set
X_normalized, X_normalized_MinMaxScaler = normalizeX(X)
logger.info('')
logger.info('--------- X_normalized done ------------')
logger.info('-------------------------- X -----------------------------')
# ## ------------------------------------------------------------------------------------------------------
# ## Load MinMaxScalerXFull
# ## ------------------------------------------------------------------------------------------------------
# In[37]: Declare some functions
def loadMinMaxScalerXFull():
X_normalized_MinMaxScaler = joblib.load('models/scaler_normalizeX.save')
return X_normalized_MinMaxScaler
# In[38]: Load Saved Normalized Data (Normalizer)
X_normalized_MinMaxScaler = loadMinMaxScalerXFull()
logger.info('')
logger.info('--------- X_normalized_MinMaxScaler load done ------------')
logger.info('-------------------------- X -----------------------------')
# In[40]:
if( explore ):
printNormalizedX(X_normalized)
X_normalized[1]
# In[42]: De-normalize Features set
X_denormalized = X_normalized_MinMaxScaler.inverse_transform(X_normalized)
# In[43]:
if( explore ):
X_denormalized[1]
X_denormalized[-1]
# ### Normalize the whole y
# In[45]: Declare some functions
def normalizeY(df):
"""Return a normalized value of df.
Save MinMaxScaler normalizer for Y variable"""
new_df = df.copy()
new_df_reshaped = new_df.values.reshape(-1,1)
scaler = MinMaxScaler(feature_range=(scaler_min, scaler_max))
scaler.fit(new_df_reshaped.astype(np.float64))
normalizedY = scaler.transform(new_df_reshaped.astype(np.float64))
normalizedY = normalizedY.flatten()
# store MinMaxScaler for Y
joblib.dump(scaler, 'models/scaler_normalizeY.save')
return normalizedY, scaler
# In[46]: Normalize Target and Save Normalized values, Normalize target variable set
y_normalized, y_normalized_MinMaxScaler = normalizeY(y)
logger.info('')
logger.info('--------- y_normalized done ------------')
logger.info('-------------------------- y -----------------------------')
# In[48]:
if( explore ):
printNormalizedY(y_normalized)
y_normalized[0:3]
# ## ------------------------------------------------------------------------------------------------------
# ## Load MinMaxScalerYFull
# ## ------------------------------------------------------------------------------------------------------
# In[50]: Declare some functions
def loadMinMaxScalerYFull():
y_normalized_MinMaxScaler = joblib.load('models/scaler_normalizeY.save')
return y_normalized_MinMaxScaler
# In[51]: Load Saved Normalized Data (Normalizer)
y_normalized_MinMaxScaler = loadMinMaxScalerYFull()
logger.info('')
logger.info('--------- y_normalized_MinMaxScaler load done ------------')
logger.info('-------------------------- y -----------------------------')
# In[52]: De-normalize Features set
y_denormalized = y_normalized_MinMaxScaler.inverse_transform(y_normalized.reshape(y_normalized.shape[0],1))
# In[53]:
if( explore ):
y_denormalized[0:3]
y_denormalized[-3:]
logger.info('')
logger.info('')
logger.info('--------- Normalization done ------------')
logger.info('----------------------------------------------------------')
# ## ------------------------------------------------------------------------------------------------------
# ## Train Neural Network with Optimizer Class, trainMultiLayerRegressor method
# ## ------------------------------------------------------------------------------------------------------
logger.info('----------------------------------------------------------')
logger.info('----------------------- MLP start ------------------------')
logger.info('----------------------------------------------------------')
# In[55]: Declare some functions
def trainMultiLayerRegressor(X_normalized, y_normalized, activation, neuronsWhole):
# Train Neural Network
mlp = MLPRegressor(hidden_layer_sizes=neuronsWhole,
max_iter=250,
activation=activation,
solver="lbfgs",
learning_rate="constant",
learning_rate_init=0.01,
alpha=0.01,
verbose=False,
momentum=0.9,
early_stopping=False,
tol=0.00000001,
shuffle=False,
# n_iter_no_change=20, \
random_state=1234)
mlp.fit(X_normalized, y_normalized)
# Save model on file system
joblib.dump(mlp, 'models/saved_mlp_model.pkl')
return mlp
# In[56]: Train Neural Network
mlp = trainMultiLayerRegressor(X_normalized, y_normalized, activation_function, neuronsWhole)
# In[57]: Declare some funcitons
def predictMultiLayerRegressor(mlp, X_normalized):
y_predicted = mlp.predict(X_normalized)
return y_predicted
# In[58]: Create prediction
y_predicted = predictMultiLayerRegressor(mlp, X_normalized)
# In[59]: Evaluete the model
from utils import evaluateGoodnessOfPrediction
goodness_of_fitt = evaluateGoodnessOfPrediction(y_normalized, y_predicted)
logger.info('------------- Neural Network Goodness of Fitt ------------')
logger.info('----------------------------------------------------------')
logger.info(' evaluateGoodnessOfPrediction(y_normalized, y_predicted)')
logger.info(' This dictionary is also the part of the return of Train')
logger.info(f'( goodness_of_fitt = \n {goodness_of_fitt}')
logger.info('----------------------------------------------------------')
# TODO
# visszatérni az értékekkel és eltárolni őket valamilyen változóban
# ## ------------------------------------------------------------------------------------------------------
# ## Report
# ## ------------------------------------------------------------------------------------------------------
VisualizePredictedYScatter(y_normalized, y_predicted, targetVariable)
VisualizePredictedYLineWithValues(y_normalized, y_predicted, targetVariable, 'Normalized')
# ### De-normlaize
# ## ------------------------------------------------------------------------------------------------------
# ## I want to see the result in original scale. I don't care about the X but the y_normalized and y_predcited.
# ## ------------------------------------------------------------------------------------------------------
# In[65]: De-normalize target variable and predicted target variable
y_denormalized = y_normalized_MinMaxScaler.inverse_transform(y_normalized.reshape(y_normalized.shape[0],1))
y_predicted_denormalized = y_normalized_MinMaxScaler.inverse_transform(y_predicted.reshape(y_predicted.shape[0],1))
# In[68]: Declare De-normalizer functions
def denormalizeX(X_normalized, X_normalized_MinMaxScaler):
X_denormalized = X_normalized_MinMaxScaler.inverse_transform(X_normalized)
return X_denormalized
# In[69]: De-normalize Features
X_denormalized = denormalizeX(X_normalized, X_normalized_MinMaxScaler)
# In[74]: Declare De-normalizer functions
def denormalizeY(y_normalized, y_normalized_MinMaxScaler):
y_denormalized = y_normalized_MinMaxScaler.inverse_transform(y_normalized.reshape(y_normalized.shape[0],1))
return y_denormalized
# In[75]: De-normalize Target
y_denormalized = denormalizeY(y_normalized, y_normalized_MinMaxScaler)
y_predicted_denormalized = denormalizeY(y_predicted, y_normalized_MinMaxScaler)
# ## ------------------------------------------------------------------------------------------------------
# ## Report
# ## ------------------------------------------------------------------------------------------------------
VisualizePredictedYLineWithValues(y_denormalized, y_predicted_denormalized, targetVariable, 'Denormalized')
# ## ------------------------------------------------------------------------------------------------------
# ## Linear Regression Learn
# ## ------------------------------------------------------------------------------------------------------
logger.info('----------------------------------------------------------')
logger.info('-------------- Linear Regression start -------------------')
logger.info('----------------------------------------------------------')
# In[125]: Declare some functions
# TODO:
# Átvezetni valahogy, hogy a bemeneti változók fényében kezelje hogy hány változó van a dataframeben
# Ugy látom, hogy az advice-ban sehol nem szerepel a trainer aminek az az oka
# hogy az Advice-nak semmi szüksége nincs a lag-ok-ra se a lead-ek-re
# Ugyanis miután meg van tanulva egy model, az már csak a model beolvasásával törödik
# és abban egyáltalán nem szerepelnek a lagok meg a lagek,
# When pandas.DataFrame (preProcessedDF) was constructed, the order of the variables
# was determined by the program. The target variable is the latest.
# As far as we do not care about the previous or following target variable,
# the program hasn't got to count till the last column.
# As soon as we get the pandas.DataFrame, the program will know the number of
# the columns.
# So 'index' is a temporal variable what contains the number of the columns -1
# In other words, the lag will be computed for every column, except the last one.
logger.info('----------------------------------------------------------')
logger.info('-------------- Create Before After Diagnosis -------------')
logger.info('----------------------------------------------------------')
logger.info(' preProcessedDF the input of createBeforeafterDFLags()')
logger.info('')
logger.info(f' preProcessedDF.shape = {preProcessedDF.shape}')
logger.info(f' preProcessedDF.columns = {preProcessedDF.columns}')
# ## ------------------------------------------------------------------------------------------------------
# ## Linear Regression Calculate N'th previous values
# ## ------------------------------------------------------------------------------------------------------
def createBeforeafterDFLags(df, lag):
beforeafterDFLags = df.copy()
dfColumnsNumber = beforeafterDFLags.shape[1]
logger.info(f' createBeforeafterDFLags(df, lag) df col number = {dfColumnsNumber}')
# index = 10
index = dfColumnsNumber - 1
logger.info(f' createBeforeafterDFLags(df, lag) df col number -1 = {index} \n')
inputVariables = np.flip(beforeafterDFLags.columns[0:index].ravel(), axis=-1)
logger.info(f' Input Variables in createBeforeafterDFLags = {inputVariables} \n')
for i in inputVariables:
new_column = beforeafterDFLags[i].shift(lag)
new_column_name = (str('prev') + str(1) + i)
beforeafterDFLags.insert(loc=index, column=new_column_name, value=new_column)
beforeafterDFLags = beforeafterDFLags[lag:] # remove first row as we haven't got data in lag var
return beforeafterDFLags, index # return not just the df but an int as well
# In[126]: Create lag variables (see above -> 'prev1CPU', 'prev1Inter', etc)
beforeafterDFLags, index1 = createBeforeafterDFLags(preProcessedDF, 1)
logger.info('----------------------------------------------------------')
logger.info('-------------- Create Before After Diagnosis -------------')
logger.info('----------------------------------------------------------')
logger.info(' after createBeforeafterDFLags(preProcessedDF, 1)')
logger.info(' beforeafterDFLags, index1 = createBeforeafterDFLags(preProcessedDF, 1)')
logger.info(f' beforeafterDFLags.shape = {beforeafterDFLags.shape} \n')
logger.info(f' beforeafterDFLags.columns = {beforeafterDFLags.columns}')
logger.info('---------------------------------------------------------- \n')
logger.debug(f"\n {beforeafterDFLags[['prev1CPU', 'CPU']].head(10)}")
logger.debug('----------------------------------------------------------')
logger.debug(f"\n {beforeafterDFLags[['WorkerCount', 'prev1WorkerCount']].head(10)}")
logger.debug('----------------------------------------------------------')
logger.debug(f"\n {beforeafterDFLags[['WorkerCount', 'prev1WorkerCount']].tail(10)}")
# ## ------------------------------------------------------------------------------------------------------
# ## Linear Regression Calculate N'th next values
# ## ------------------------------------------------------------------------------------------------------
# Na itt viszont már para van itt viszont már tudnia kell, hogy mi is tulajdonképen
# változók hossza
def createBeforeafterDFLeads(df, index, lead = 1):
beforeafterDFLeads = df.copy()
inputVariables = np.flip(beforeafterDFLeads.columns[0:index].ravel(), axis=-1)
logger.info(f'Input Variables in createBeforeafterDFLeads: {inputVariables} \n')
# In the case of WorkerCount column we take account the next value.
# Every other case we take account the parameter what was given by the user.
for i in inputVariables:
if( i == 'WorkerCount'):
lead_value = 1
else:
lead_value = lead
new_column = beforeafterDFLeads[i].shift(-lead_value)
new_column_name = (str('next') + str(1) + i)
beforeafterDFLeads.insert(loc=index, column=new_column_name, value=new_column)
beforeafterDFLeads = beforeafterDFLeads[:-lead] # remove last row as we haven't got data in lead (next) variables
beforeafterDFLeads = beforeafterDFLeads.iloc[:,:-1] # remove last column - Latency
return beforeafterDFLeads
# In[129]: Create lead variables (see above -> 'next1CPU', 'next1Inter', etc)
beforeafterDF = createBeforeafterDFLeads(beforeafterDFLags, index1, lead = lead)
logger.info('----------------------------------------------------------')
logger.info('-------------- Create Before After Diagnosis -------------')
logger.info('----------------------------------------------------------')
logger.info(' after createBeforeafterDFLeads(beforeafterDFLags, index1, lead = lead)')
logger.info(' beforeafterDF = createBeforeafterDFLeads(beforeafterDFLags, index1, lead = lead)')
logger.info(f' beforeafterDF.shape = {beforeafterDF.shape} \n')
logger.info(f' beforeafterDF.columns = {beforeafterDF.columns}')
logger.info('---------------------------------------------------------- \n')
logger.debug(f"\n {beforeafterDF[['prev1CPU', 'CPU', 'next1CPU']].head(10)}")
logger.debug('----------------------------------------------------------')
logger.debug(f"\n {beforeafterDF[['WorkerCount', 'prev1WorkerCount', 'next1WorkerCount']].head(10)}")
logger.debug('----------------------------------------------------------')
logger.debug(f"\n {beforeafterDF[['WorkerCount', 'prev1WorkerCount', 'next1WorkerCount']].tail(10)}")
# In[131]: Assert
logger.debug('----------------------------------------------------------')
logger.debug('---------- Assert --------------')
logger.debug('----------------------------------------------------------')
logger.debug(f'---------------- original _input_metrics length | |
<reponame>ginking/archimedes-1
# -*- coding: utf-8 -*-
# NOTE TRADINGKING IS NOW ALLY PROGRESS AS NORMAL AS INCORPORATED CHANGES IN PLACE
from __future__ import unicode_literals
from datetime import datetime
from datetime import timedelta
from holidays import UnitedStates
from lxml.etree import Element
from lxml.etree import SubElement
from lxml.etree import tostring
from oauth2 import Consumer
from oauth2 import Client
from oauth2 import Token
from os import getenv
from os import path
from pytz import timezone
from pytz import utc
from simplejson import loads
from threading import Timer
from logs import Logs
# Read the authentication keys for TradeKing from environment variables.
TRADEKING_CONSUMER_KEY = getenv("TRADEKING_CONSUMER_KEY")
TRADEKING_CONSUMER_SECRET = getenv("TRADEKING_CONSUMER_SECRET")
TRADEKING_ACCESS_TOKEN = getenv("TRADEKING_ACCESS_TOKEN")
TRADEKING_ACCESS_TOKEN_SECRET = getenv("TRADEKING_ACCESS_TOKEN_SECRET")
# Read the TradeKing account number from the environment variable.
TRADEKING_ACCOUNT_NUMBER = getenv("TRADEKING_ACCOUNT_NUMBER")
# Only allow actual trades when the environment variable confirms it.
USE_REAL_MONEY = getenv("USE_REAL_MONEY") == "YES"
# The base URL for API requests to TradeKing.
TRADEKING_API_URL = "https://api.tradeking.com/v1/%s.json"
# The XML namespace for FIXML requests.
FIXML_NAMESPACE = "http://www.fixprotocol.org/FIXML-5-0-SP2"
# The HTTP headers for FIXML requests.
FIXML_HEADERS = {"Content-Type": "text/xml"}
# The amount of cash in dollars to hold from being spent.
#CASH_HOLD = 1000
CASH_HOLD = 1
# The fraction of the stock price at which to set order limits.
LIMIT_FRACTION = 0.1
# The delay in seconds for the second leg of a trade.
ORDER_DELAY_S = 30 * 60
# Blacklsited stock ticker symbols, e.g. to avoid insider trading or above 100USD (low account)
TICKER_BLACKLIST = []
# We're using NYSE and NASDAQ, which are both in the easters timezone.
MARKET_TIMEZONE = timezone("US/Eastern")
# The filename pattern for historical market data.
MARKET_DATA_FILE = "market_data/%s_%s.txt"
class Trading:
"""A helper for making stock trades."""
def __init__(self, logs_to_cloud):
self.logs = Logs(name="trading", to_cloud=logs_to_cloud)
def make_trades(self, companies):
"""Executes trades for the specified companies based on sentiment."""
# Determine whether the markets are open.
market_status = self.get_market_status()
if not market_status:
self.logs.error("Not trading without market status.")
return False
# Filter for any strategies resulting in trades.
actionable_strategies = []
market_status = self.get_market_status()
for company in companies:
strategy = self.get_strategy(company, market_status)
if strategy["action"] != "hold":
actionable_strategies.append(strategy)
else:
self.logs.warn("Dropping strategy: %s" % strategy)
if not actionable_strategies:
self.logs.warn("No actionable strategies for trading.")
return False
# Calculate the budget per strategy.
balance = self.get_balance()
budget = self.get_budget(balance, len(actionable_strategies))
if not budget:
self.logs.warn("No budget for trading: %s %s %s" %
(budget, balance, actionable_strategies))
return False
self.logs.debug("Using budget: %s x $%s" %
(len(actionable_strategies), budget))
# Handle trades for each strategy.
success = True
for strategy in actionable_strategies:
ticker = strategy["ticker"]
action = strategy["action"]
# Execute the strategy.
if action == "bull":
self.logs.info("Bull: %s %s" % (ticker, budget))
success = success and self.bull(ticker, budget)
elif action == "bear":
self.logs.info("Bear: %s %s" % (ticker, budget))
success = success and self.bear(ticker, budget)
else:
self.logs.error("Unknown strategy: %s" % strategy)
return success
def get_strategy(self, company, market_status):
"""Determines the strategy for trading a company based on sentiment and
market status.
"""
ticker = company["ticker"]
sentiment = company["sentiment"]
strategy = {}
strategy["name"] = company["name"]
if "root" in company:
strategy["root"] = company["root"]
strategy["sentiment"] = company["sentiment"]
strategy["ticker"] = ticker
strategy["exchange"] = company["exchange"]
# Don't do anything with blacklisted stocks.
if ticker in TICKER_BLACKLIST:
strategy["action"] = "hold"
strategy["reason"] = "blacklist"
return strategy
# TODO: Figure out some strategy for the markets closed case.
# Don't trade unless the markets are open or are about to open.
if market_status != "open" and market_status != "pre":
strategy["action"] = "hold"
strategy["reason"] = "market closed"
return strategy
# Can't trade without sentiment.
if sentiment == 0:
strategy["action"] = "hold"
strategy["reason"] = "neutral sentiment"
return strategy
# Determine bull or bear based on sentiment direction.
if sentiment > 0:
strategy["action"] = "bull"
strategy["reason"] = "positive sentiment"
return strategy
else: # sentiment < 0
strategy["action"] = "bear"
strategy["reason"] = "negative sentiment"
return strategy
def get_budget(self, balance, num_strategies):
"""Calculates the budget per company based on the available balance."""
if num_strategies <= 0:
self.logs.warn("No budget without strategies.")
return 0.0
return round(max(0.0, balance - CASH_HOLD) / num_strategies, 2)
def get_market_status(self):
"""Finds out whether the markets are open right now."""
clock_url = TRADEKING_API_URL % "market/clock"
response = self.make_request(url=clock_url)
if not response:
self.logs.error("No clock response.")
return None
try:
clock_response = response["response"]
current = clock_response["status"]["current"]
except KeyError:
self.logs.error("Malformed clock response: %s" % response)
return None
if current not in ["pre", "open", "after", "close"]:
self.logs.error("Unknown market status: %s" % current)
return None
self.logs.debug("Current market status: %s" % current)
return current
def get_historical_prices(self, ticker, timestamp):
"""Finds the last price at or before a timestamp and at EOD."""
# Start with today's quotes.
quotes = self.get_day_quotes(ticker, timestamp)
if not quotes:
self.logs.warn("No quotes for day: %s" % timestamp)
return None
# Depending on where we land relative to the trading day, pick the
# right quote and EOD quote.
first_quote = quotes[0]
first_quote_time = first_quote["time"]
last_quote = quotes[-1]
last_quote_time = last_quote["time"]
if timestamp < first_quote_time:
self.logs.debug("Using previous quote.")
previous_day = self.get_previous_day(timestamp)
previous_quotes = self.get_day_quotes(ticker, previous_day)
if not previous_quotes:
self.logs.error("No quotes for previous day: %s" %
previous_day)
return None
quote_at = previous_quotes[-1]
quote_eod = last_quote
elif timestamp >= first_quote_time and timestamp <= last_quote_time:
self.logs.debug("Using closest quote.")
# Walk through the quotes unitl we stepped over the timestamp.
previous_quote = first_quote
for quote in quotes:
quote_time = quote["time"]
if quote_time > timestamp:
break
previous_quote = quote
quote_at = previous_quote
quote_eod = last_quote
else: # timestamp > last_quote_time
self.logs.debug("Using last quote.")
quote_at = last_quote
next_day = self.get_next_day(timestamp)
next_quotes = self.get_day_quotes(ticker, next_day)
if not next_quotes:
self.logs.error("No quotes for next day: %s" % next_day)
return None
quote_eod = next_quotes[-1]
self.logs.debug("Using quotes: %s %s" % (quote_at, quote_eod))
return {"at": quote_at["price"], "eod": quote_eod["price"]}
def get_day_quotes(self, ticker, timestamp):
"""Collects all quotes from the day of the market timestamp."""
# The timestamp is expected in market time.
day = timestamp.strftime("%Y%m%d")
filename = MARKET_DATA_FILE % (ticker, day)
if not path.isfile(filename):
self.logs.error("Day quotes not on file for: %s %s" %
(ticker, timestamp))
return None
quotes_file = open(filename, "r")
try:
lines = quotes_file.readlines()
quotes = []
# Skip the header line, then read the quotes.
for line in lines[1:]:
columns = line.split(",")
market_time_str = columns[1]
try:
market_time = MARKET_TIMEZONE.localize(datetime.strptime(
market_time_str, "%Y%m%d%H%M"))
except ValueError:
self.logs.error("Failed to decode market time: %s" %
market_time_str)
return None
price_str = columns[2]
try:
price = float(price_str)
except ValueError:
self.logs.error("Failed to decode price: %s" % price_str)
return None
quote = {"time": market_time, "price": price}
quotes.append(quote)
return quotes
except IOError as exception:
self.logs.error("Failed to read quotes cache file: %s" % exception)
return None
finally:
quotes_file.close()
def is_trading_day(self, timestamp):
"""Tests whether markets are open on a given day."""
# Markets are closed on holidays.
if timestamp in UnitedStates():
self.logs.debug("Identified holiday: %s" % timestamp)
return False
# Markets are closed on weekends.
if timestamp.weekday() in [5, 6]:
self.logs.debug("Identified weekend: %s" % timestamp)
return False
# Otherwise markets are open.
return True
def get_previous_day(self, timestamp):
"""Finds the previous trading day."""
previous_day = timestamp - timedelta(days=1)
# Walk backwards until we hit a trading day.
while not self.is_trading_day(previous_day):
previous_day -= timedelta(days=1)
self.logs.debug("Previous trading day for %s: %s" %
(timestamp, previous_day))
return previous_day
def get_next_day(self, timestamp):
"""Finds the next trading day."""
next_day = timestamp + timedelta(days=1)
# Walk forward until we hit a trading day.
while not self.is_trading_day(next_day):
next_day += timedelta(days=1)
self.logs.debug("Next trading day for %s: %s" %
(timestamp, next_day))
return next_day
def utc_to_market_time(self, timestamp):
"""Converts a UTC timestamp to local market time."""
utc_time = utc.localize(timestamp)
market_time = utc_time.astimezone(MARKET_TIMEZONE)
return market_time
def market_time_to_utc(self, timestamp):
"""Converts a timestamp in local market time to UTC."""
market_time = MARKET_TIMEZONE.localize(timestamp)
utc_time = market_time.astimezone(utc)
return utc_time
def as_market_time(self, year, month, day, hour=0, minute=0, second=0):
"""Creates a timestamp in market time."""
market_time = datetime(year, month, day, hour, minute, second)
return MARKET_TIMEZONE.localize(market_time)
def make_request(self, url, method="GET", body="", headers=None):
"""Makes a request to the TradeKing API."""
consumer = Consumer(key=TRADEKING_CONSUMER_KEY,
secret=TRADEKING_CONSUMER_SECRET)
token = Token(key=TRADEKING_ACCESS_TOKEN,
secret=TRADEKING_ACCESS_TOKEN_SECRET)
client = Client(consumer, token)
self.logs.debug("TradeKing request: %s %s %s %s" %
(url, method, body, headers))
response, content = client.request(url, method=method, body=body,
headers=headers)
self.logs.debug("TradeKing response: %s %s" % (response, content))
try:
return loads(content)
except ValueError:
self.logs.error("Failed to decode JSON response: %s" % content)
return None
def fixml_buy_now(self, ticker, quantity, limit):
"""Generates the FIXML for a buy order."""
fixml = Element("FIXML")
fixml.set("xmlns", FIXML_NAMESPACE)
| |
'CaPjVaQrpyN138TarQ7CYBqBOz0ZF7.png',
u'Sport 1 Cz': logobase + 'kCLlfkFz3Ba3BL9Jc9ZPgUKXh2piyv.png',
u'Sport 1 LT': logobase + 'nRFc87aOV1vRnjEqmQZUneZe4HiCqn.png',
u'Sport 1 Select': logobase + '899pteevSriMFxe4omDA4G6l9i0czY.png',
u'Sport 1 Voetbal HD': logobase + '0WEqpl3cqObcLs2J0l5DDhVPZalvXx.png',
u'Sport 2 Cz': logobase + 'YLmEjnczWQGJcZC0SxRcH4ifPcwYlx.png',
u'Sport Klub 1 HD': logobase + 'cLQ3uuWhQqxCQk5RUDwA9x7bLUHBwn.png',
u'Sport Klub 2 HD': logobase + 'LiIua5Nyy8xdHFYhwgrwbcajbKz6fH.png',
u'Sport Klub 3 HD': logobase + 'UkUGpf3hamDPGPtkqximS96rrts4jx.png',
u'Sport TV 1': logobase + '7u5sbYjzJdQopdQ6bAH7GLDUsPWnXc.png',
u'Sport TV 2': logobase + 'u6T8L5PPYKHCbBATjdzjLpTC8zzCdV.png',
u'Sport TV 3': logobase + 'dYTM6Oqhaqw18FI6uYPS5yhjCmc1nZ.png',
u'Sport TV 4': logobase + 'YfFhr0OCmbN8vHUuGCLp488dxGpKVw.png',
u'Sport TV 5': logobase + 'YyzfJTMsBcmTKptLzxZcAcLKFj52LT.png',
u'Super Tennis HD': logobase + 'mjQW91VJdjIEhADvOO2s6OiKNeUdUK.png',
u'TGirls TV': logobase + 'FufZ2heFswzvAbRkTQZs8UJBYGsxuG.png',
u'TLC HD': logobase + 'gT4olUY9nFJbGRCdwd7hHJp1NJ5eJr.png',
u'TLC': logobase + 'gT4olUY9nFJbGRCdwd7hHJp1NJ5eJr.png',
u'TV 1000 Action East': logobase + 'GblbxkDGXZyW5oWt9W8wuERQAiZ7ZT.png',
u'TV 1000 Русское кино': logobase + 'ch5DX6f8hxDnmyzrjotUoKHNGzcw9P.png',
u'TV 1000': logobase + 'WJMEvVafVakrm7BUMy1lzku7VQCx25.png',
u'TV Bakhoristan': logobase + 'LoXaN929SQC5r5aQ3JETDXwG6VlPMk.png',
u'TV Plus BG': logobase + 'cxxTbCRSZsh4l1CNpZ4mYychepxUGw.png',
u'TV Safina': logobase + 'mJUmNhJbQqcr2NPppAryEJqDPBJGV0.png',
u'TV Sale': logobase + 'hs0YdiUTlpRtb3wTiP4cXboX0H9oTN.png',
u'TV Smichov': logobase + 'hqgkCNoqMXiAgNU6uedqUNIR7Z0ox5.png',
u'TV XXI (TV21)': logobase + 'TKchoTWZFRMmGDBok08zoEFJ8mJJCe.png',
u'TV1000 Comedy HD': logobase + 'ygGiR2hkQLySH6khdo8GV9CyMJ8dXi.png',
u'TV1000 Megahit HD': logobase + 'lVPY7WCjn1WM6NL6tfLFy8iGA4yk3Z.png',
u'TV1000 Premium HD': logobase + 'raoDrpin8VKmi522LZWzSF0fLRO04m.png',
u'TV2 Sport HD': logobase + 'iL3TM972YPxOxajyfbuNcKGPFrVvTg.png',
u'TV4 Sport HD': logobase + 'm8tNJfJGN7cYZtUWBggz3PMVB28clK.png',
u'TV5 Monde Europe': logobase + 'ko7rbRBnyK1iINkLOA2adRvgVOEgUK.png',
u'TV6 LT': logobase + 'SKskx67yBUvbTdMIIZjH1Z4EcB8nYX.png',
u'TVT 1': logobase + 'CKKdhDfmno9O52tMfWptiAQT0IBWV8.png',
u'Teledeporte': logobase + '57r0Kq1rFB6vcMeldfWDvp438Jz5qT.png',
u'Teletravel HD': logobase + '4ZlASq3oDpOjXfhwluOzY74sy9elaE.png',
u'TiJi': logobase + 'mD3GW0E7rdPwc4stjk7xrLI2gZn4Hq.png',
u'Tonis': logobase + '38BRA5jO6LAsQ6rv1NC3FMJ6KALp8z.png',
u'Top Shop': logobase + 'uD257Lhw7Ko2YD1reC0nRqW7lpy93D.png',
u'Topsong TV': logobase + 'DsJRpcbI6rgjONbQftC5nHt1XMAXYQ.png',
u'Torrent TV - Android': logobase + 'wf43FCQBGnvSrknDmSJXTOtbVWgOiP.png',
u'Travel + adventure HD': logobase + 'b1HifWKMyefmDDvaDAJTwNNTaD8LF4.png',
u'Travel + adventure': logobase + 'b1HifWKMyefmDDvaDAJTwNNTaD8LF4.png',
u'Travel Channel HD': logobase + 'zfnAGLCvIu1fx9hfrITAZMoo9HYww4.png',
u'UBR': logobase + 'F6EzmjkOBVB0gmn1kQX6itv5VvFml5.png',
u'Ukraine Today': logobase + '3AVq6O577A7uw9uZ7fxIvpvE3CxdtW.png',
u'VH1 Classic': logobase + 'FhxUFQ2Bsfom4vb8Ce41gFObAbh1Vh.png',
u'VH1': logobase + '58.png',
u'VIVA DE': logobase + 'HagNMshKtJ7zKnk9fdmBLhITjoWdrJ.png',
u'Venus': logobase + 'R2ug0cuB3SmBBA6LK1uoNbEV66u39v.png',
u'Viasat Explore': logobase + 'uCqpsdKP0ialUUYxUk2fXshYdYfxzW.png',
u'Viasat Fotboll': logobase + '0JLqj3qwFoT1Y61scCyUdWioV5U6hx.png',
u'Viasat History': logobase + 'MWGbB8wJp5Gm4vbPHl0ktohDDjMKdr.png',
u'Viasat Hockey': logobase + 'CuAbCRGdf3Z1FGFiwErTbHZ3lAMJzr.png',
u'Viasat Motor': logobase + 'RuYtGxEpqJ5DG7WxGCMWNDXosRdh59.png',
u'Viasat Nature East': logobase + 'yimDcPvajJcUKQm9bY15cDdp3rJFcp.png',
u'Viasat Nature-History HD': logobase + 'pSP6zxmuO4PU6xa6KRlZ9L8vvVM2Dy.png',
u'Viasat Sport Baltic': logobase + 'ZIITckvF1w5u1MlubmhoG45HxPgcZZ.png',
u'Viasat Sport HD': logobase + 'prAZKkny3W1HGM03lP0EhzcMmTPZdi.png',
u'Viasat Sport Sverige': logobase + 'prAZKkny3W1HGM03lP0EhzcMmTPZdi.png',
u'Viasat Sport': logobase + 'prAZKkny3W1HGM03lP0EhzcMmTPZdi.png',
u'Viasat TV3 Sport 1': logobase + 'LUsZ9yjy6izQJHd2z2Hf7uBZ4UyUcM.png',
u'Vip TV HD': logobase + 'VXNvw8nbJhjRncTmxkuglf8htUxN2N.png',
u'WedTV (Свадебный)': logobase + 'u93ysJkZEp1NzeG7jTbVgB7nKhDTqH.png',
u'World Fashion': logobase + '2YI4sT9YkGezrw9vZPn0uIRhZ7E2BV.png',
u'WowGirls': logobase + 'phiImbBi8hRs3LqmOOpLVsPqQkD2Hc.png',
u'XXL': logobase + '6nJtj85PlL0MxB8RDkM3toyGND3Anc.png',
u'ZDF': logobase + '5SH5FeZiITw27CPxscjksZp272u7He.png',
u'Zee TV': logobase + '1HooaeEhMSvpKmWv6nneZxnTmG5r6Q.png',
u'Zoom': logobase + 'SyisYhg411o7z9kXci4vfpLq4KBZZ4.png',
u'bTV BG': logobase + 'xiNqovHjloSoVzrVieKo6saLQTUnJ7.png',
u'nSport + HD': logobase + 'JSpj8Lq758dRJzBaTEjM8nbSfnLf9M.png',
u'АРМ ТВ': logobase + 'OgrdBlfYISfcpr0XO0ImEyelCMjUVx.png',
u'Авто плюс': logobase + 'WkRxjy6fJEBJ5NZiaGn2j05eqfFfQq.png',
u'Беларусь 24': logobase + 'GxA1KJP5YwpWc38BoPEmLwQH6uDeEz.png',
u'Беларусь 5': logobase + 'aMU4HXJN11Bo9WissbPW4rhe06vAql.png',
u'Белсат ТВ': logobase + '9VYuUQxx1ss7ieu2upENtlibyamBP0.png',
u'Бигуди': logobase + 'JvcMdB5e6KVBpbXT12ulzmDqenheRx.png',
u'Бобер': logobase + '2Edln8vEbg7UUSVUo7lIJPR780OWAR.png',
u'Боец': logobase + 'pmkJgRqsuZDzuN4c6v6jZaBVKCN3K3.png',
u'Бойцовский клуб': logobase + 'oo4RN3hUUjuVbtegW8Q5QE0bT6GwwD.png',
u'ВТВ': logobase + 'svsUD6TinXyv3B1q5sZf3fI9ebmpaF.png',
u'Вместе РФ': logobase + 'qa50GYekwBWym7KtoJdzrWHWqN8TeU.png',
u'Вопросы и ответы': logobase + 'xbV8M35FkvpieQ3TUEL8fhwU8MzjmQ.png',
u'Время': logobase + 'F44yKDJQLsX0llpZ2wupg8V5vHx5fF.png',
u'Громадське ТБ HD': logobase + 'Ovkd9TiVv3nLcKPwQS2wkJ85KyYCMQ.png',
u'Детский мир': logobase + '00Vf3rPABNnbNQ6Rv0dnfcg3JsJelA.png',
u'Детский': logobase + 'jk8kody2p38CKdj5KGXWMwRLjgFIlG.png',
u'Джус ТВ': logobase + 'qVNFoyUAOJSDvN9tHhf9j2AP7x4VkV.png',
u'Дождь HD': logobase + '381.png',
u'Дождь': logobase + '381.png',
u'Дом кино International': logobase + '69MqZE2YHJNewQkqRbJea33WuRkKgo.png',
u'Дом кино': logobase + 'jlC78Fy13KWjQUN6l3FtbsRLZDvc0x.png',
u'Домашний (+4)': logobase + 'LRMaRyPCroUq4dVcRhwKJKVuhvdvUZ.png',
u'Домашний': logobase + 'qmqrH2E2EX11qitbIvq0CYsxQjsHGm.png',
u'Драйв ТВ': logobase + 'pmmgMKcRbxeYkjVUVr4IAWM0UuZHO4.png',
u'Еврокино': logobase + '34mszCG0j0Vf6kFcMrLPnFEA8UPdu6.png',
u'Еда HD': logobase + 'ojUD1jhpv7HBOLmubpEBOsANkpYNtk.png',
u'Еда ТВ': logobase + 'TWdAdMXfMSylb2mQ4efFnOAYosymNC.png',
u'Еспресо ТВ': logobase + 'lOwm890F5URuR5Ej7IacerzECPIDt4.png',
u'<NAME>': logobase + 'xgKSMwqBdEyXnbVgb8LtNXSMiaPcOx.png',
u'Живи': logobase + 'cOluSjslxxs3JZtSVO8c15xh7h8SDU.png',
u'Загородная жизнь': logobase + 'cGGo8HRkVhy66UXKXZ4tH5HyUaaxJA.png',
u'Звезда (+4)': logobase + '01VqCLfVy5OsMBN1qXjxOTp5NKT4QS.png',
u'Звезда': logobase + '0HLRrFHt2QIkbJpLc1fy0RVe7hqCEC.png',
u'Здоровое ТВ': logobase + '2LgJcyMnjJpMAhUqX3rdQ4ChOmbuTo.png',
u'Зоо ТВ': logobase + 'RtAhntWPlKQs6CIYAb72piNF9EsN3E.png',
u'Зоопарк': logobase + '1Ugpb5T1THFcFpn19Mnua21KxHkjct.png',
u'Иллюзион+': logobase + 'XOO3bLrAAvCj45nIsxsGCppY14bY1n.png',
u'Индия': logobase + 'XVWyHt5bFFcZNzmysBSjuVdGBGl45D.png',
u'Интер': logobase + '3SP67FapzyZqMVZTPiJIcN09KRkTeu.png',
u'Интер+': logobase + 'QEdaDBbqr13CCfwKQAP77UZYPQIPn0.png',
u'Искушение': logobase + 'p3WsIen84SZK76zTMWnslNgUjsqsMZ.png',
u'История': logobase + 'PNRaeOUFzOPFtrclFBBRTckj6Lvo0u.png',
u'К1': logobase + 'mk2mYb28HFIxkFIiMNQWmKUdn1Y8hD.png',
u'К2': logobase + 'IjG76jf8k8HTNLooNpUiEXtkPfA2rG.png',
u'КХЛ HD': logobase + 'kRN7BwVtcdaXrU4Mdg24qhFAxjx9oZ.png',
u'КХЛ ТВ': logobase + '216.png',
u'Карусель': logobase + 'S233D4b6eq7KOXfdyi4dY2GokKeltg.png',
u'<NAME>': logobase + 'C1AZimW2NnNA17H1uJLxxePUMTPQZ7.png',
u'Кино ТВ': logobase + 'KkITMDICqC1erWdSqyOqoccqde2wHC.png',
u'Кино премиум HD': logobase + 'p580CRZ8bBS6dw3plMWhhxXSzQ59uS.png',
u'Кинопоказ': logobase + 'v0JEbxExcFI8dVEzCkpZUoktgiS9t7.png',
u'Кинорейс 1': logobase + 'q3N266MTLCzzNVXy3q330VIVgTp93L.png',
u'Кинорейс 2': logobase + 'RO9ac4e18hSAsPhquZ9JzyTHo5oqMK.png',
u'Комедия ТВ': logobase + 'L2MEpT2YePoDvmKRjYy6yyt5ssH1m4.png',
u'Красная линия': logobase + 'I43S6jd5noclar0LlPJnyY8adonmUV.png',
u'Кто есть кто': logobase + 'MwNkO3fXd6KefRdiGlOdOQ5q0Zu7kS.png',
u'Кубань 24 Орбита': logobase + 'FauvJxsKmI5a1fR62uSH9hJfHs5TCr.png',
u'Кубань 24': logobase + 'CAAqiN96tQzFDdtz3vjrrgeIjAKqNq.png',
u'Культура Украина': logobase + 'pyKdve4YhoChQFGSha8J0FBWBf302a.png',
u'Кухня ТВ': logobase + 'G0WbVMphlP9oJ6KvHRfx0xDfhrF9Re.png',
u'Ля-минор': logobase + '8FJA3xMMHcrZuGifHViyVQLjVIem5u.png',
u'М1': logobase + 'ezvu2ugYMGnZ968LlnjPw7VjqWIPeM.png',
u'М2': logobase + 'U4s78hznNz7mFYZQICkxN7J0HTtlCP.png',
u'Малятко ТВ': logobase + 'kjYF9vS2IDTMehpzC7WWfjnZ4NVpuk.png',
u'Мама': logobase + 'nw9fROQIjjKSDp8Wjkjl1Wt0n0xHxd.png',
u'Марс ТВ': logobase + 'KnDO2ZAW1Xlahhp1ysdlDUPCQI3Jix.png',
u'Матч ТВ HD': logobase + 'MXyy9Uud7oDuH8JqVisjsD0csgAHnQ.png',
u'Матч ТВ': logobase + 'hQDOuQjUVczvUU2ocLE0tkC1siCqpo.png',
u'Мега': logobase + 'IXY7dRFoq0qCqn4UbY47iP36vVZ6ck.png',
u'Мир (+3)': logobase + 'QxOYkz6f80IdhmC4RSHI1cMd32CqYZ.png',
u'Мир 24': logobase + 'auv6717gJOWi0A2VoeDQaCsx9G1NOj.png',
u'Мир HD': logobase + 'Oq6h2IicTagHENQu1mFkjLk5rChMnr.png',
u'Мир': logobase + 'Oq6h2IicTagHENQu1mFkjLk5rChMnr.png',
u'Многосерийное ТВ': logobase + '4TMYdVpZYXafyIumuB5d7PrjFnslyT.png',
u'Москва 24': logobase + 'dZcmoqRoZLhCBh8BE4RnbQivuDY6hH.png',
u'Москва Доверие': logobase + '9oPazhJQrGZcSN64ZOS3WjLwGmQIZy.png',
u'<NAME>': logobase + 'Qa41eifERrD77xQsmpRGbeTq95Ldlv.png',
u'Мужское кино': logobase + 'PUDb8m2JFLndsPvb56tdH0V4RW0kZc.png',
u'Мужской': logobase + '6YbhuWNqPKQWWsUGbBnSbAbm7IGssX.png',
u'Муз ТВ': logobase + 'gttVvZmkAklbl2i0Mqy1MCzSCn7WiY.png',
u'<NAME>': logobase + 'fD2Hnsq5BPMGvobLDMPZP049yNhBYt.png',
u'Мульт': logobase + 'ZVzHvGF8mZ6RTsSh6aWsPbF1FBLjyp.png',
u'Мультимания': logobase + '132.png',
u'НЛО ТВ': logobase + '2VGhYruaQo19G1NLGoOiTrwmPxef7d.png',
u'НСТ': logobase + 'fKYzdlWRz68qd9mRZnWuxMY73EyaSz.png',
u'НТВ (+4)': logobase + 'B5GA1cfgmn8EsxrdwfNUIrEbdqarXf.png',
u'НТВ (+7)': logobase + 'B5GA1cfgmn8EsxrdwfNUIrEbdqarXf.png',
u'НТВ HD': logobase + 'zdJ3ye6d3UWl5a56zm6LjqYH6ziSOs.png',
u'НТВ': logobase + 'B5GA1cfgmn8EsxrdwfNUIrEbdqarXf.png',
u'НТВ+ 3D': logobase + 'qvuG0JySlHPlEH9A7G4xMNjBqOB35h.png',
u'НТВ+ Баскетбол': logobase + 'bIWuyv7DJ65D5hIANkeo9SyIHGUXtn.png',
u'НТВ+ Кино Союз': logobase + 'F3RiiQtowA2YoHw73iEzcMLZwfDiSx.png',
u'НТВ+ Кино плюс': logobase + 'cnz8ZMypP2HV6phwv3rkSVQ7CgJExi.png',
u'НТВ+ Киноклуб': logobase + 'nRnksgRuhojvbFDqh5KZ30XJQ4iyFO.png',
u'НТВ+ Наше кино': logobase + 'UXJcZjVdZVIzciVHgGT3e6XxdRaBsD.png',
u'НТВ+ Премьера': logobase + 'lDiI54Y3LjIAOg5VV0adicP3OJrdgo.png',
u'НТВ+ Спорт плюс': logobase + '222.png',
u'НТВ+ Спорт': logobase + '2WCUNhvAYk7RJUCcbt4N8xvOxWGlbx.png',
u'НТВ+ Теннис': logobase + 'SdtlGA6I7WvjOpHsbabE4C9DP7JvJ7.png',
u'НТВ+ Футбол 1 HD': logobase + '5gVddUBrGBIvdTx0cpRgCMJwVgphJz.png',
u'НТВ+ Футбол 1': logobase + 'EQQJV8zgnv5MCfa5VBcvOm1GsLWovM.png',
u'НТВ+ Футбол 2 HD': logobase + '8X1dxETwOup3Qton0J35BoW5glu5UG.png',
u'НТВ+ Футбол 2': logobase + 'hD6OLNWbxyDtqE5VlVxCaoNeEoYpFb.png',
u'НТВ+ Футбол 3 HD': logobase + 'eC4IeAxFTXXMsVfQaQHPtAN7LvosGd.png',
u'НТВ+ Футбол 3': logobase + '4B2emgwWQ7kgFJwdoh0zNxDguh4Fh3.png',
u'НТН (Украина)': logobase + 'LpQE1Odb1EoH5dJ90gWjItVyEYBXsw.png',
u'НТРК "ИРТА"': logobase + 'd8zPTPLcK87xhBnGVFhgkuwFBY2TnK.png',
u'Нано ТВ': logobase + 'QuURIfJUmXegxsHMYqMivVwxizbfKd.png',
u'Наука 2.0': logobase + 'ypWbqYqKApM8cnDK1FibvQgpmgEay9.png',
u'Наше любимое кино': logobase + 'LSR5M6VxB0YDwv6803zrGFkq7vGQ3J.png',
u'Ника ТВ': logobase + 'pb3d3rBN4qW7ggzsosbAZXflfIv0Ty.png',
u'Новороссия ТВ': logobase + 'zUchDq13UVJRmlwAl3feV8cgKHYSyE.png',
u'Новый канал': logobase + 'k7YdHhVpFZPIkBMXS2P2O2TkZSPf0y.png',
u'Ностальгия': logobase + 'tIfiXoDaXoZevuGu9pZJSvX8unv1xl.png',
u'Ночной клуб': logobase + 'nXifSdkxHJVKI4SKtgtBQmCSHXtgOt.png',
u'ОТР': logobase + 'CqxKorK72v3ULbWkB3ZOhdte0duYZa.png',
u'Оплот 2': logobase + 'EqwpuUgrI6Wl6JVDK2fLtXkNaqOXeU.png',
u'Оплот ТВ': logobase + 'gvofGxTug45qSt1vsX0BPzQxGTrwTr.png',
u'Оружие': logobase + 'CyDUCmYXK8WS2kXCX5kiAOFejnlwoP.png',
u'Остросюжетное HD': logobase + 'mxF7CZsqsDRMMK4pN8ekdccEgvEsZC.png',
u'Охотник и рыболов': logobase + 'Ws2ddPI0b5Ie7PymoPUsboVlz9lYMS.png',
u'Парк развлечений': logobase + 'beyfqyeacrFG0PrOeKUQhzQ4bV6Q5d.png',
u'Первый автомобильный (Украина)': logobase + 'oZTXrmNOxeJIVSbnuxqbiuAL3voXYa.png',
u'Первый городской (Киров)': logobase + 'sxUNuJVQpUjRMmASa5TvwlGykSBAkY.png',
u'Первый городской Одесса': logobase + 'vBOI3YTA4FDLD0c7BHHjq476p9GMCZ.png',
u'Первый деловой': logobase + 'a1Qf3MpxC9FPD68Tj8vtUTNK8P25xr.png',
u'Первый канал (+4)': logobase + 'nHJycH0CkOhPeZ9DmB47iSMWP5HyWz.png',
u'Первый канал (+6)': logobase + 'xEhi4YWxLlIcHq33Y44NrYvyHRArwa.png',
u'Первый канал (Европа)': logobase + 'WimZD6efLd6QotrPP9uiJeF7t50nFv.png',
u'Первый канал (СНГ)': logobase + 'WimZD6efLd6QotrPP9uiJeF7t50nFv.png',
u'Первый канал HD': logobase + 'VxAFWzh1y88c8Aqa17TsxD2IO5pqoi.png',
u'Первый канал': logobase + 'WimZD6efLd6QotrPP9uiJeF7t50nFv.png',
u'Первый музыкальный HD': logobase + 'YxKl6Jqi6fmlUJjYGPnBhWhntKzI65.png',
u'Первый музыкальный UHD': logobase + 'YxKl6Jqi6fmlUJjYGPnBhWhntKzI65.png',
u'Первый музыкальный Россия HD': logobase + 'h7EDhdGypKmtfEP98O052SLlXUCcXt.png',
u'Первый музыкальный Россия': logobase + 'MkX2WG1zhZ2KcYFdL0xWH1T4xkO7UW.png',
u'Первый музыкальный канал': logobase + 'gYpYhzD3akuKSFpRmkh2p36pXnqHoW.png',
u'Первый национальный (Украина)': logobase + '8yGRnEG4pNYMLFDVekA2yeOAX1lGZ2.png',
u'Первый образовательный': logobase + '1kXxtStMuodaPU09H3rla3ry3QA2Wr.png',
u'Перец (+4)': logobase + '28.png',
u'Перец': logobase + '28.png',
u'Пиксель ТВ': logobase + 'BdCXB7wPZMNvlWzB5xEFzmsYUXcfXW.png',
u'ПлюсПлюс': logobase + '6gVIy7RMokFO61iVawgwbthe5mhgqm.png',
u'Право ТВ': logobase + 'jqV4vr8830fm6lYlX9F7w3tRvcrRra.png',
u'Просвещение': logobase + 'Fpx3Vqqk2VNcXl4YjsfO53XscWadvF.png',
u'Психология 21': logobase + 'AyLAdiqcKu5X8ykdLf2bO9HsxMlJdO.png',
u'Пятница (+2)': logobase + '0fafj6PSIWdqtBdgwYTl9M06SDU2wA.png',
u'Пятница (+4)': logobase + 'fF9FYWNiHFfuR1ZrkaboYHwi1O37TJ.png',
u'Пятница (+7)': logobase + '0fafj6PSIWdqtBdgwYTl9M06SDU2wA.png',
u'Пятница': logobase + '0fafj6PSIWdqtBdgwYTl9M06SDU2wA.png',
u'Пятый канал (+4)': logobase + 'tUE3C0hSxn7AxGHhST36CWi6HgJbIi.png',
u'Пятый канал': logobase + 'nIUDYY41OO4Xo0ntGpGv2rfpOR5ngt.png',
u'РБК': logobase + 'JUMDXZxxB3UiVpMpU8t0aCpbVzxTmP.png',
u'РЕН ТВ (+2)': logobase + 'xwSFxBlid4YhPjZl8ibcIeTlzP0VVS.png',
u'РЕН ТВ (+4) (резерв)': logobase + '2Z3hcLqC0pQC9gLkuZSl5WkHfS5HYb.png',
u'РЕН ТВ (+4)': logobase + 'BE7n1y2cjisjflpQuMdC9P3c79rWb7.png',
u'РЕН ТВ': logobase + 'LJvkfB2kYaDzij1Y13Fy6syUCkP5Y6.png',
u'<NAME>': logobase + 'DixgG6tVZzcVHO2LPQEx3QrtfoVah3.png',
u'<NAME>': logobase + 'hBFJBYNiqZUom0ooVtNEJKliZwfioO.png',
u'Радость моя': logobase + 'VRylZFYgFq7AL0FWcbf5JVOX3desn3.png',
u'Ретро ТВ': logobase + 'axrNIB7372SHIRwqT0jBbfyvjSoZ7I.png',
u'Россия 1 (+4)': logobase + 'DL17FIS3R8m6eWTwFvdDYualmxvkGV.png',
u'Россия 1': logobase + 'UUrfoqi6NQcc9gRLnCc8ODZJ2T3ShE.png',
u'Россия 24': logobase + 'LWfGV6eICPYL7psaBfw2dOgGrOtHFS.png',
u'Россия HD': logobase + 'ghvqmVpPWqn9x6POAm9UJBvXFzTrqN.png',
u'Россия К (+4)': logobase + 'lzLdqpUZ8iHL9JEV7vQGG1gSlyswfB.png',
u'Россия К': logobase + 'W9pWrec1BOJTmj8okrFeyM44wcpyd4.png',
u'Россия РТР (Украина)': logobase + '5o9OWeEw90hM5ouECuTLwj5QP8MwU3.png',
u'Русская ночь': logobase + '9Sh9bJuj6js5AJsypAd6UvwnsIB25R.png',
u'Русский бестселлер': logobase + 'b5JXaosgmcanh9EVJg52yBefvdLQF7.png',
u'Русский детектив': logobase + '7I7VjbsFMIkZdoSbHFXiKEVZKNUbOM.png',
u'Русский иллюзион': logobase + 'E9Imfr8aHN5midPVpNhJ3fo49FHbQE.png',
u'Русский роман': logobase + '2smriIFxtj7Ojh4jyZq0K1XrT98XjS.png',
u'Русский экстрим': logobase + 'upndVpIdjY3vb5vrituof5UcKISNcQ.png',
u'Рыжий': logobase + 'wfBSy60qHaPSKPpTfrNv9Q167iHIPu.png',
u'СТБ': logobase + 'saZlIDrdaXWoiQa8sfZp2bEAeH0kXk.png',
u'СТРК HD': logobase + 'xOmVS1kQFIHeAwqtJbBfrbE75Quj2a.png',
u'СТС (+4)': logobase + | |
# standard library
import socketserver
import logging
import threading
import time
import socket
import struct
import binascii
from uuid import getnode as get_mac
# third party
# from this package
# #######################################
# THE HANDLERS FOR THE ACTUAL CONNECTIONS
# #######################################
class PhantomDataTransferHandler(socketserver.BaseRequestHandler):
"""
A handler object will be instantiated to handle a new connection to the PhantomDataTransferServer, which is being
used to transmit image data from the phantom camera to the control unit.
This module will only handle a single connection, which means receiving all the bytes of the image and then
returning the complete byte string back to the server, before the handler closes.
CHANGELOG
Added 23.02.2019
"""
def handle(self):
"""
Main method for handling the data transfer connection.
Will handle a single data transmission and then end itself
CHANGELOG
Added 23.02.2019
Changed 26.02.2019
Now the program is not expecting the complete amount of pixels to be received, but is also fine with the last
100 bytes missing from the last TCP package. The missing bytes will just be padded with zeros. This was
necessary as the camera seems to miss a few bytes from time to time.
Changed 18.03.2019
Fixed a bug, where the program would call strip on the data and randomly interpret some of the pixels as
whitespace characters, which would cause some pixels to go missing.
:return:
"""
self.server.logger.debug(
'New DATA STREAM connection from IP %s and PORT %s',
self.client_address[0],
self.client_address[1]
)
# To this buffer we will append all the incoming byte data and then, when all the data is received return the
# contens of the buffer to the server, so that the PhantomSocket client can access it there
buffer = []
buffer_length = 0
while self.server.running:
data = self.request.recv(524288)
if data and data[0] == '' or len(data) == 0:
continue
if len(buffer) != self.server.size:
buffer.append(data)
buffer_length += len(data)
self.server.logger.debug('Received: %s/%s', buffer_length, self.server.size)
if buffer_length == self.server.size:
# Once the image has been received, the byte string is being passed to the server object by setting
# its 'image_bytes' attribute. The the main loop is being ended, thus ending the whole handler thread
append_bytes = ('\x00' * (self.server.size - len(buffer))).encode('utf-8')
buffer_bytes = b''.join(buffer)
self.server.image_bytes = buffer_bytes
self.server.logger.debug('Received %s bytes; had to append %s', len(buffer), len(append_bytes))
self.server.logger.debug('Finished receiving image with %s bytes', len(self.server.image_bytes))
break
self.request.close()
self.server.logger.debug('Data Handler shutting down...')
class PhantomXDataTransferHandler(threading.Thread):
"""
This thread will be started by the 10G Data transfer server, if an image has to be received
"""
# CONSTANT DEFINITIONS
# --------------------
# The protocol identifier, which a phantom camera uses in the ethernet frame header to identify the data packages
PHANTOM_ETHERNET_PROTOCOL = b'\x88\xb7'
# The header size is required to compute the payload size, which in turn is required to extract the payload data
# from the whole ethernet frame.
# 10.05.2019
# Turns out the header size is actually 32 and not 14
HEADER_SIZE = 32
def __init__(self, server):
"""
The constructor
CHANGELOG
Added 19.03.2019
:param server:
"""
threading.Thread.__init__(self)
self.server = server
self.socket = None
# MAIN THREAD METHOD
# ------------------
# This is the method, that is being executed as the thread
def run(self):
"""
A new socket will be created to listen for raw ethernet frames. All ethernet frames are then received and
decoded to check for their protocol identifier, if it matches a phantom camera the payload is being appended to
the buffer for the image data
CHANGELOG
Added 19.03.2019
:return: void
"""
self.server.logger.debug('New RAW frame handler for INTERFACE %s', self.server.ip)
# Creating the socket to accept the raw ethernet frame
# For the case of a RAW socket connection "server.ip" has to be the string identifier of a network interface.
# "socket.htons(3)" specifies to listen for all protocols (The irrelevant packages are filtered after receiving)
self.socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.htons(3))
self.socket.bind((self.server.ip, 0))
# To this buffer we will append all the incoming byte data and then, when all the data is received return the
# contents of the buffer to the server, so that the PhantomSocket client can access it there
# The handler object really only interact with the server, so that the client has to only interact with the
# server.
buffer = []
buffer_length = 0
self.server.logger.debug("Running status: %s", self.server.running)
while self.server.running:
data = self.socket.recv(10000)
# This will decode the raw bytes string, which has been received into the various parts of the header and
# the payload and save those as values in a dictionary for easy access
data_dict = self.unpack_data(data)
# A package is only processed as part of the image, if the protocol identifier matches the one used by the
# phantom camera.
# If that is the case, the payload data is being appended to the buffer.
if data_dict['protocol'] == self.PHANTOM_ETHERNET_PROTOCOL:
payload = data_dict['payload']
buffer.append(payload)
buffer_length += len(payload)
self.server.logger.debug('Received %s/%s bytes total', buffer_length, self.server.size)
# when all data has been received, the buffer is being concatenated to one long bytes string and returned
# to the server.
if buffer_length >= self.server.size:
self.server.image_bytes = b''.join(buffer)[0:self.server.size]
self.server.logger.debug('Received image with %s/%s bytes', buffer_length, self.server.size)
break
self.socket.close()
self.server.logger.debug('Data Handler shutting down...')
# HELPER METHODS
# --------------
@classmethod
def unpack_data(cls, data):
"""
Given the data received as bytes string, this method will unpack the various informations within the header
and the payload into a dictionary with the keys "source" for the src MAC address, "destination" for the dst
MAC, "protocol" for the used protocol identifier and "payload" for the data sent within the ethernet frame
CHANGELOG
Added 19.03.2019
Changed 10.05.2019
Fixed the unpacking by changing the header size from 14 to 32 and modifying the struct unpack accordingly
:param data:
:return: data
"""
payload_length = len(data) - 32
format_string = '!6s6s2s18s{}s'.format(payload_length)
source_address, destination_address, protocol, _, payload = struct.unpack(format_string, data)
data_dict = {
'source': source_address,
'destination': destination_address,
'protocol': protocol,
'payload': payload
}
return data_dict
# ################################
# THE DATA TRANSFER SERVER OBJECTS
# ################################
class DataTransferServer:
"""
This is the base class for all possible variations of data transfer servers. The most important ones being the
one for the "normal" network data transmission and the one for the 10G network transmission.
This class defines all the common functionality which all data transfer servers have to share (acting as sort of an
interface as well.).
IMPORTANT: A child class inheriting from this class has to initialize the according socketserver.Server class first,
as this base class makes assumptions about that behaviour.
CHANGELOG
Added 19.03.2019
"""
def __init__(self, ip, port, format, handler_class):
"""
The constructor.
CHANGELOG
Added 19.03.2019
:param ip:
:param port:
:param format:
:param handler_class:
"""
# Creating a new logger, whose name is a combination from the module name and the class name of this very class
self.log_name = '{}.{}'.format(__name__, self.__class__.__name__)
self.logger = logging.getLogger(self.log_name)
# Saving the ip and the port. The tuple of both ip and port is needed for most of the networking functionality
# of python.
self.ip = ip
self.port = port
self.address = (self.ip, self.port)
self.format = format
self.handler_class = handler_class
self.size = 0
self.image_bytes = None
self.running = None
self.thread = None
def set_data_size(self, size):
self.size = size
class PhantomDataTransferServer(socketserver.ThreadingTCPServer, DataTransferServer):
"""
This is a threaded server, that is being started, by the main phantom control instance, the PhantomSocket.
It listens for incoming connections FROM the phantom camera, because over these secondary channels the camera
transmits the raw byte data.
The way it works:
The main program execution maintains a reference to this object. This object will work as the main access point
for receiving images using the "receive_image" method. But the actual process of receiving the image is not handled
in this object.
Although this object listens for incoming data connections, but as soon as a camera makes a request for a new
connection for transferring an image, this object automatically creates a handler object and passes the data
connection to that handler, which is then in | |
<filename>main.py<gh_stars>0
#!/usr/bin/env python
# If you keep OpenSCAD in an unusual location, uncomment the following line of code and
# set it to the full path to the openscad executable.
# Note: Windows/python now support forward-slash characters in paths, so please use
# those instead of backslashes which create a lot of confusion in code strings.
# OPENSCAD_PATH = "C:/Program Files/OpenSCAD/openscad"
# do not edit below unless you know what you are doing!
import os
import configparser
import platform
from shutil import copy, rmtree
import shlex
import random as rd
import time
import numpy as np
import math
import re
from PIL import Image
import subprocess as sp
halt = -1 # debug: terminate skipping this shell (0 to n to enable)
# Make sure we have a fresh random seed
rd.seed()
USE_SCAD_THREAD_TRAVERSAL = False
STL_DIR = "stl_files"
PREV_DIR = "prev"
def openscad():
try:
if OPENSCAD_PATH:
return OPENSCAD_PATH
except NameError:
pass
if os.getenv("OPENSCAD_PATH"):
return os.getenv("OPENSCAD_PATH")
if platform.system() == "Darwin":
return "/Applications/OpenSCAD.app/Contents/MacOS/OpenSCAD"
if platform.system() == "Windows":
# Note: Windows allows forward slashes now
return '"C:/Program Files/OpenSCAD/openscad"'
# Default to linux-friendly CLI program name
return "openscad"
def prepwd():
# Linux and other systems that use PATH variables don't need an absolute path configured.
# if os.path.exists(openscad_exe) == False:
# input("ERROR: openscad path not found.")
# exit()
if os.path.exists(STL_DIR):
rmtree(STL_DIR)
os.mkdir(STL_DIR) # Default perms: world-writable
if os.path.exists(PREV_DIR):
rmtree(PREV_DIR)
os.mkdir(PREV_DIR) # Default perms: world-writable
def has_scad_threading():
cmd = [openscad(), "--help"]
# Note: help comes on stderr
out = sp.check_output(cmd, stderr=sp.STDOUT, universal_newlines=True)
m = re.search(r"enable experimental features:\s(.+?)\n\s*\n", out, flags=re.DOTALL)
if m:
return "thread-traversal" in re.split(r"\s*\|\s*", m[1])
return False
def scad_version():
cmd = [openscad(), "--version"]
# Note: version comes on stderr
out = sp.check_output(cmd, stderr=sp.STDOUT, universal_newlines=True)
m = re.search(r"enable experimental features:\s(.+?)\n\s*\n", out, flags=re.DOTALL)
m = re.match(r"^\s*OpenSCAD version (\d{4})\.(\d\d)\.(\d\d)\s*$", out)
return (int(m[1]), int(m[2]), int(m[3])) if m else ()
def execscad(threadid=0):
print("Executing OpenSCAD script...")
cmd = [openscad()]
if USE_SCAD_THREAD_TRAVERSAL:
cmd.append("--enable=thread-traversal")
cmd.extend(
[
"-o",
os.path.join(os.getcwd(), STL_DIR, str(shell + 1) + ".stl"),
os.path.join(os.getcwd(), "make_shells.scad"),
]
)
print(cmd)
sp.run(cmd)
def udnbers(n, vi, nc, mw, mh, stag):
for y in range(0, mh):
for x in range(0, mw):
x3 = int((x + stag[y]) % mw)
x2 = [x - 1, x + 1, x, x]
y2 = [y, y, y - 1, y + 1]
for i in range(0, 4):
if stag[y] % mw > 0:
x2[i] = int((x2[i] + mw) % mw)
else:
if x2[i] < 0:
x2[i] = 0
if x2[i] > mw - 1:
x2[i] = mw - 1
if (
not ((x3 == 0 and i == 0) or (x3 == mh - 1 and i == 1))
and y2[i] > -1
and y2[i] < mh
):
n[x, y, i] = vi[int(x2[i]), int(y2[i])] == 0
else:
n[x, y, i] = 0
nc[x, y] = len(np.argwhere(n[x, y].astype("int")))
def genmaze(mw, mh, stag, st, ex):
im = Image.new("L", [2 * mw + 1, 2 * mh + 1], 0)
visited = np.zeros(mw * mh)
nbercount = np.zeros(mw * mh)
nbers = np.ones(mw * mh * 4)
walls = np.ones(mw * mh * 4)
r = int((mw * mh) / 2)
vcount = 1
visited[r] = 1
visited = visited.reshape([mw, mh])
nbers = nbers.reshape([mw, mh, 4])
nbercount = nbercount.reshape([mw, mh])
walls = walls.reshape([mw, mh, 4])
udnbers(nbers, visited, nbercount, mw, mh, stag)
while vcount < (mw * mh):
v = np.transpose(np.nonzero(np.logical_and(visited == 1, nbercount > 0)))
# choose branch
r = rd.randint(0, len(v) - 1)
c = v[r]
# choose wall to break
if nbers[c[0], c[1]][0] == 1 or nbers[c[0], c[1]][1] == 1:
# horizontal bias when possible
r = rd.randint(0, nbercount[c[0], c[1]] - 1 + hbias)
if r > nbercount[c[0], c[1]] - 1:
r = int(r - (nbercount[c[0], c[1]]))
if nbers[c[0], c[1]][0] == 1 and nbers[c[0], c[1]][1] == 1:
r = int(r % 2)
else:
r = 0
else:
# otherwise just vertical
r = rd.randint(0, nbercount[c[0], c[1]] - 1)
n = np.argwhere(nbers[c[0], c[1]])[r]
# break wall
walls[c[0], c[1], n] = 0
c2 = c
# walls: 0=L 1=R 2=U 3=D
if n == 0:
n2 = 1
c2[0] = c[0] - 1
elif n == 1:
n2 = 0
c2[0] = c[0] + 1
elif n == 2:
n2 = 3
c2[1] = c[1] - 1
else:
n2 = 2
c2[1] = c[1] + 1
c2[0] = int((c2[0] + mw) % mw)
visited[c2[0], c2[1]] = 1
walls[c2[0], c2[1], n2] = 0
udnbers(nbers, visited, nbercount, mw, mh, stag)
vcount = vcount + 1
# preview
if ((i == 0 and shell < shells - 1) or (i == 1 and shell > 0)) and tpp != 1:
im.putpixel((1 + ex * 2, 0), 255)
im.putpixel((1 + st * 2, mh * 2), 255)
for y in range(0, mh):
for x in range(0, mw):
imx = 1 + x * 2
imy = 1 + y * 2
imnx = [imx - 1, imx + 1, imx, imx]
imny = [imy, imy, imy - 1, imy + 1]
if visited[x, y] == 1:
im.putpixel((imx, imy), 255)
for idx in range(0, 4):
if walls[x, y, idx] == 0:
im.putpixel((imnx[idx], imny[idx]), 255)
if tpp == 2:
im.save(os.path.join(os.getcwd(), PREV_DIR, str(shell + 1) + "a.png"))
else:
im.save(os.path.join(os.getcwd(), PREV_DIR, str(shell + 1) + ".png"))
return walls
def gen():
global shell
global d2
global mh
global mw
global i
global tpp
if shell < shells:
if shell == halt:
exit()
if shell + 1 > 0 and shell + 1 < shells and shell + 1 == tp and tpp < 1:
tpp = -1
if tpp < 1:
print("part: " + str(shell + 1))
wt = mwt
if tpp < 1:
if shell == 0:
d = (mw * us * p) / np.pi + wt - marge * 2
else:
if shell == tp:
d = d2
else:
d = d2 + us + wt + marge * 2
if i == 0:
mw = int(math.ceil((d / p + us) * np.pi / 2 / us))
if shell == (shells - 2):
mh += 1
else:
if shell == (shells - 1):
mw = int(math.ceil((d / p + us) * np.pi / 2 / us))
else:
mw = int(math.ceil((d2 / p + us) * np.pi / 2 / us))
mh += 1
else:
d = d2 + us + wt + marge * 2
mw = int(math.ceil((d / p + us) * np.pi / 2 / us))
mh += 1
# stag/shift
stag = np.zeros(mh)
if stagmode in (1, 2):
for y in range(0, mh):
if y == 0 or stagmode == 1:
stag[y] = rd.randint(0, mh - 1)
else:
stag[y] = stag[y - 1] + rd.randint(0, mh - 1)
elif stagmode == 3:
stag = np.multiply(np.arange(0, mh), stagconst).astype("int")
# maze
st = rd.randint(0, mw - 1)
ex = rd.randint(0, mw - 1)
marr = genmaze(int(mw), int(mh), stag, st, ex)
matrix = []
for y in range(0, mh):
row = []
for x in range(0, mw * p):
x2 = x % mw
r = marr[x2, y, 1] == 0
u = marr[x2, y, 3] == 0
if u and r:
row.append("3")
elif u:
row.append("2")
elif r:
row.append("1")
else:
row.append("0")
matrix.append(f"[{','.join(row)}]")
s = f"[{','.join(matrix)}];"
if tpp < 1:
maze_num = 1
open_mode = "w"
else:
maze_num = 2
open_mode = "a+"
with open("maze.scad", open_mode) as maze:
maze.write(f"maze{maze_num}=")
maze.write(
"\n".join(
[
s,
f"h{maze_num}={mh};",
f"w{maze_num}={mw * p};",
f"st{maze_num}={st};",
f"ex{maze_num}={ex};",
]
)
)
base = 1
lid = 0
if shell == shells - 1:
lid = 1
base = 0
if shell > shells - 2:
mos = 0
else:
mos = shells - shell - 2
with open("config.scad", "w+") as cfg:
cfg.write(
"\n".join(
[
f"p={p};",
f"tpp={tpp};",
f"is={shell};",
f"os={mos};",
f"lid={lid};",
f"base={base};",
f"iw={wt};",
f"id={d};",
f"s={us};",
f"i={i};",
f"bd={d + wt * 2 + us * 2};",
f"m={marge};",
]
)
)
if shell < shells - 2:
d2 = d
if shell > 0 and shell < shells and shell == tp and tpp < 1:
if i == 0: # double nub transition
tpp | |
"fit_prior": Categorical([True, False]),
"alpha": Real(0, 1, prior='uniform')
}
},
{
"id": 11,
"type": "classification",
"model": GaussianNB(),
"search_space":
{
"var_smoothing": Real(0, 1, prior='uniform')
}
},
{
"id": 12,
"type": "classification",
"model": BernoulliNB(),
"search_space":
{
"fit_prior": Categorical([True, False]),
"alpha": Real(0, 1, prior='uniform')
}
},
{
#ALL DOCUMENDATION FEATURES
"id": 0,
"type": "regression",
"model": RandomForestRegressor(),
"principal": "RandomForest",
"family": "ensemble",
"search_space":
[
{
"n_estimators": Integer(100, 500),
"criterion": Categorical(['mse']), # 'squared_error', 'absolute_error', 'poisson'
"max_depth": Integer(6, 20), # values of max_depth are integers from 6 to 20
"min_samples_split": Integer(2, 10),
"min_samples_leaf": Integer(1, 10),
"min_weight_fraction_leaf": Real(0, 0.5, prior='uniform'),
"max_features": Categorical(['auto', 'sqrt','log2']),
#"max_leaf_nodes": Integer(0, 10),
#"min_impurity_decrease": Real(0, 1, prior='uniform'),
"bootstrap": Categorical([False]), # values for boostrap can be either True or False
"oob_score": Categorical([False]),
"n_jobs": [njobs],
"random_state": [rands],
#"verbose": Integer(0, 2),
#"warm_start": Categorical([False, True]),
"ccp_alpha": Real(0, 1, prior='uniform'),
#"max_samples": Integer(0, len(X)),
},
{
"n_estimators": Integer(100, 500),
"criterion": Categorical(['mse']), # 'squared_error', 'absolute_error', 'poisson'
"max_depth": Integer(6, 20),
"min_samples_split": Integer(2, 10),
"min_samples_leaf": Integer(2, 10),
"min_weight_fraction_leaf": Real(0, 0.5, prior='uniform'),
"max_features": Categorical(['auto', 'sqrt','log2']),
#"max_leaf_nodes": Integer(0, 10),
#"min_impurity_decrease": Real(0, 1, prior='uniform'),
"bootstrap": Categorical([True]),
"oob_score": Categorical([False, True]),
"n_jobs": [njobs],
"random_state": [rands],
#"verbose": Integer(0, 2),
#"warm_start": Categorical([False, True])
"ccp_alpha": Real(0, 1, prior='uniform'),
#"max_samples": Integer(0, len(X)),
}
]
},
{
#ALL DOCUMENDATION FEATURES
"id": 1,
"type": "regression",
"model": DecisionTreeRegressor(),
"principal": "DecisionTree",
"family": "tree",
"search_space":
{
"criterion": Categorical(['mse', 'friedman_mse', 'mae']), # 'squared_error', 'absolute_error', 'poisson'
#"splitter": Categorical(['best', 'random']),
#"max_depth": Integer(6, 20), # values of max_depth are integers from 6 to 20
"min_samples_split": Integer(2, 10),
"min_samples_leaf": Integer(2, 10),
"min_weight_fraction_leaf": Real(0, 0.5, prior='uniform'),
"max_features": Categorical(['auto', 'sqrt','log2']),
"random_state": [rands],
#"max_leaf_nodes":Integer(0, 10),
#"min_impurity_decrease": Real(0, 1, prior='uniform'),
#"alpha": Real(0, 1, prior='uniform')
}
},
{
"id": 2,
"type": "regression",
"model": KNeighborsRegressor(),
"search_space":
{
"n_neighbors": Integer(1, 10),
"weights": Categorical(['uniform', 'distance']),
"algorithm": Categorical(['auto', 'ball_tree', 'kd_tree', 'brute']),
"leaf_size": Integer(1, 50),
"p": Integer(1, 2),
"metric": Categorical(['minkowski']),
#metric_params": Categorical(['']),
"n_jobs": [njobs],
}
},
{
"id": 3,
"type": "regression",
"model": SVR(),
"search_space":
[
{
"C": Real(0.1, 10, prior='uniform'),
"kernel": Categorical(['linear']), #precomputed #Precomputed matrix must be a square matrix
"gamma": Categorical(['scale']), #auto #will chose one of the existing
#"shrinking": Categorical([True, False]),
#"probability": Categorical([True, False]),
"tol": [0.01],
#"epsilon": Real(0, 0.5, prior='uniform'),
#"shrinking": Categorical(['ovo', 'ovr']),
#"cache_size": Integer(1, 500),
#"verbose": Categorical([True, False]),
#"max_iter": Integer(-1, 1000),
},
#{
# "C": Real(0.1, 10, prior='uniform'),
# "kernel": Categorical(['rbf']), #precomputed #Precomputed matrix must be a square matrix
# "gamma": Categorical(['scale']), #auto #will chose one of the existing
# #"shrinking": Categorical([True, False]),
# #"probability": Categorical([True, False]),
# "tol": [0.01],
# #"epsilon": Real(0, 0.5, prior='uniform'),
# #"shrinking": Categorical(['ovo', 'ovr']),
# #"cache_size": Integer(1, 500),
# #"verbose": Categorical([True, False]),
# #"max_iter": Integer(-1, 1000),
#},
#{
# "C": Real(0.1, 10, prior='uniform'),
# "kernel": Categorical(['sigmoid']), #precomputed #Precomputed matrix must be a square matrix
# "gamma": Categorical(['scale']), #auto #will chose one of the existing
# "coef0": Real(0, 1, prior='uniform'),
# #"shrinking": Categorical([True, False]),
# #"probability": Categorical([True, False]),
# "tol": [0.01],
# #"epsilon": Real(0, 0.5, prior='uniform'),
# #"shrinking": Categorical(['ovo', 'ovr']),
# #"cache_size": Integer(1, 500),
# #"verbose": Categorical([True, False]),
# #"max_iter": Integer(-1, 1000),
#},
{
"C": Real(0.1, 10, prior='uniform'),
"kernel": Categorical(['poly']), #precomputed #Precomputed matrix must be a square matrix
"degree": [1],
"gamma": Categorical(['scale']), #auto #will chose one of the existing
"coef0": Real(0, 1, prior='uniform'),
#"shrinking": Categorical([True, False]),
#"probability": Categorical([True, False]),
"tol": [0.01],
#"epsilon": Real(0, 0.5, prior='uniform'),
#"shrinking": Categorical(['ovo', 'ovr']),
#"cache_size": Integer(1, 500),
#"verbose": Categorical([True, False]),
#"max_iter": Integer(-1, 1000),
}
]
},
{
"id": 4,
"type": "regression",
"model": SGDRegressor(),
"principal": "StohasticGradient",
"family": "linear",
"search_space":
[
{
"loss": Categorical(['squared_error', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive']),
"penalty": Categorical(['l2', 'l1']),
"alpha": Real(0, 0.5, prior='uniform'),
"fit_intercept": Categorical([True, False]),
#"max_iter": Integer(500, 1000),
#"tol": Real(0, 0.5, prior='uniform'),
#"shuffle": Categorical([True, False]),
#"verbose": Categorical([True, False]),
#"epsilon": Real(0, 0.5, prior='uniform'),
"random_state": [rands],
"learning_rate": Categorical(['constant', 'optimal', 'invscaling', 'adaptive']),
"eta0": Real(0, 0.5, prior='uniform'), #eta0 must be > 0
#"power_t": Real(0, 0.5, prior='uniform'),
#"early_stopping": Categorical([True, False]),
#"validation_fraction": Real(0, 0.5, prior='uniform'),
#"n_iter_no_change": Integer(1, 10),
#"warm_start": Categorical([True, False]),
#"average": Categorical([True, False]),
},
{
"loss": Categorical(['squared_error', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive']),
"penalty": Categorical(['elasticnet']),
"alpha": Real(0, 0.5, prior='uniform'),
"l1_ratio": Real(0, 0.5, prior='uniform'),
"fit_intercept": Categorical([True, False]),
#"max_iter": Integer(500, 1000),
#"tol": Real(0, 0.5, prior='uniform'),
#"shuffle": Categorical([True, False]),
#"verbose": Categorical([True, False]),
#"epsilon": Real(0, 0.5, prior='uniform'),
"random_state": [rands],
"learning_rate": Categorical(['constant', 'optimal', 'invscaling', 'adaptive']),
"eta0": Real(0, 0.5, prior='uniform'), #eta0 must be > 0
#"power_t": Real(0, 0.5, prior='uniform'),
#"early_stopping": Categorical([True, False]),
#"validation_fraction": Real(0, 0.5, prior='uniform'),
#"n_iter_no_change": Integer(1, 10),
#"warm_start": Categorical([True, False]),
#"average": Categorical([True, False]),
}
]
},
{
"id": 5,
"type": "regression",
"model": Ridge(),
"principal": "StohasticGradient",
"family": "linear",
"search_space":
[
{
"alpha": Real(0, 0.5, prior='uniform'),
"fit_intercept": Categorical([True, False]),
"normalize": Categorical([True, False]),
"copy_X": Categorical([True, False]),
#"max_iter": Integer(500, 1000),
#"tol": Real(0, 0.5, prior='uniform'),
"solver": Categorical(['svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag', 'saga']),
"positive": Categorical([False]),
"random_state": [rands],
},
{
"alpha": Real(0, 0.5, prior='uniform'),
"fit_intercept": Categorical([True, False]),
"normalize": Categorical([True, False]),
"copy_X": Categorical([True, False]),
#"max_iter": Integer(500, 1000),
#"tol": Real(0, 0.5, prior='uniform'),
"solver": Categorical(['lbfgs']),
"positive": Categorical([True]),
"random_state": [rands],
}
]
},
{
"id": 6,
"type": "regression",
"model": LinearRegression(),
"search_space":
{
"fit_intercept": Categorical([True, False]),
"normalize": Categorical([True, False]),
#"copy_X": Categorical([True, False]),
"n_jobs": [njobs],
#"positive": Categorical([False, True]),
}
},
{
"id": 7,
"type": "regression",
"model": BayesianRidge(),
"search_space":
{
"n_iter": [300],
"tol": Real(0, 0.5, prior='uniform'),
"alpha_1": Real(0, 0.5, prior='uniform'),
"alpha_2": Real(0, 0.5, prior='uniform'),
"lambda_1": Real(0, 0.5, prior='uniform'),
"lambda_2": Real(0, 0.5, prior='uniform'),
"alpha_init": Real(0, 0.5, prior='uniform'),
"compute_score": Categorical([True, False]),
"fit_intercept": Categorical([True, False]),
"normalize": Categorical([True, False]),
#"copy_X": Categorical([True, False]),
#"verbose": Categorical([True, False]),
}
},
{
"id": 8,
"type": "regression",
"model": ARDRegression(),
"search_space":
{
"n_iter": [300],
"tol": Real(0, 0.5, prior='uniform'),
"alpha_1": Real(0, 0.5, prior='uniform'),
"alpha_2": Real(0, 0.5, prior='uniform'),
"lambda_1": Real(0, 0.5, prior='uniform'),
"lambda_2": Real(0, 0.5, prior='uniform'),
"compute_score": Categorical([True, False]),
"threshold_lambda":Real(10000, 20000, prior='uniform'),
"fit_intercept": Categorical([True, False]),
"normalize": Categorical([True, False]),
#"copy_X": Categorical([True, False]),
#"verbose": Categorical([True, False]),
}
}
]
return models
def sample_gausian(df, cls, n, r):
df = df.sample(n = n, random_state = r)
return df
def sample_stratified(df, cls, n, r):
n = min(n, df[cls].value_counts().min())
df_ = df.groupby(cls).apply(lambda x: x.sample(n))
df_.index = df_.index.droplevel(0)
return df_
def load_data(df: None, args):
# Load Dataset
mode_intr_meth = args['mode_intr_meth']
if mode_intr_meth == "trai":
data_path = args["data_trai_path"]
data_name = args["data_trai_name"]
data_extn = args["data_trai_extn"]
data_sepa = args["data_trai_sepa"]
data_deci = args["data_trai_deci"]
elif mode_intr_meth == "pred":
data_path = args["data_pred_path"]
data_name = args["data_pred_name"]
data_extn = args["data_pred_extn"]
data_sepa = args["data_pred_sepa"]
data_deci = args["data_pred_deci"]
data_mode = args["data_mode"]
if data_mode == "local":
df = pd.read_csv('{0}/{1}.{2}'.format(data_path, data_name, data_extn), sep=data_sepa, decimal=data_deci, low_memory=False)
elif data_mode == "drive":
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
df = pd.read_csv('{0}/{1}.{2}'.format(data_path, data_name, data_extn), sep=data_sepa, decimal=data_deci, low_memory=False)
elif data_mode == "dataframe":
df = df
data_smpl_mode = args["data_smpl_mode"]
data_smpl_pop = args["data_smpl_pops"]
if data_smpl_mode == True:
data_smpl_ran = args["rands"]
data_smpl_typ = args["data_smpl_type"]
df_class = args['mode_pres_cols_clas']
print("Sampling mode with {} samples".format(data_smpl_pop))
if data_smpl_typ == "gausian":
df = sample_gausian(df=df, cls=df_class, n = data_smpl_pop, r = data_smpl_ran)
elif data_smpl_typ == "stratified":
df = sample_stratified(df=df, cls=df_class, n = data_smpl_pop, r = data_smpl_ran)
else:
print("Complete mode with {} samples".format(data_smpl_pop))
return df
def dict_list(list, key_name, key_value, val_name):
for item in list:
if item[key_name]==key_value:
return item[val_name]
def detect_rows(df):
#Check Row Lenght
return len(df.index)
def detect_cols(df):
#Check Col Lenght
return len(df.columns)
def detect_shape(df):
#Check Shape
return df.shape
def detect_format(metric, value):
if metric == "time":
return "{}s".format(value)
elif metric == "accuracy":
return "{}%".format(value)
elif metric == "r2":
return "{}%".format(value)
def detect_sample(df, args):
#Sampling Method
args['data_smpl_mode'] = False
args['data_smpl_pops'] = 0
df = load_data(df, args)
#------------------------------------#
thrs_per = args["mode_pres_rows_thrs_per"]
thrs_min = args["mode_pres_rows_thrs_min"]
lens_alls = len(df)
lens_thrd = int(lens_alls * thrs_per)
#------------------------------------#
if thrs_per == -1: #Manual Mode - Complete Dataset
sample = {"smpl_mode": False, "smpl_pops": lens_alls }
elif thrs_per == 0:
sample = {"smpl_mode": True, "smpl_pops": thrs_min }
elif thrs_per >= 0:
if lens_alls <= thrs_min: #Automatic Mode - Dataset's length is smaller than sample ratio
sample = {"smpl_mode": False, "smpl_pops": lens_alls }
elif lens_alls > thrs_min: #Automatic Mode - Dataset's length is grater than sample ratio
sample = {"smpl_mode": True, "smpl_pops": lens_thrd }
return sample
def detect_types(df, args):
df_types = pd.DataFrame(df.dtypes).reset_index().rename(columns={"index": "feature_name", 0: "feature_orig"})
type_num = ["int16","int32","int64","float16","float32","float64"]
type_str = ["string", "object"]
type_cat = ["bool"]
def transform(feature_name, feature_orig):
if str.lower(str(feature_orig)) in type_num:
df_types_thes = args['mode_pres_ftrs_thrs_typ']
if (1.*df[feature_name].nunique()/df[feature_name].count() < df_types_thes): #or some other threshold
return ["Numeric", "Categorical"]
elif (1.*df[feature_name].nunique()/df[feature_name].count() >= df_types_thes): #or some other threshold
return | |
freedom]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2CDF: [input, -3] #[input, degrees of freedom]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2QF: [input, -3] #[input, degrees of freedom]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.chi2QF: [input, 3] #[input, degrees of freedom]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############# TEST F DISTRIBUTION ########################
def testFDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fPDF: [input, 4, 10]
''')
self.assertEqual( engine.action(0.0), 0.0)
self.assertAlmostEqual(engine.action(1.5), 0.2682, places=3)
self.assertAlmostEqual(engine.action(2.0), 0.1568, places=3)
self.assertAlmostEqual(engine.action(10.0), 0.000614, places=4)
self.assertEqual( engine.action(-20.0), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fCDF: [input, 4, 10]
''')
self.assertEqual( engine.action(0.0), 0.0)
self.assertAlmostEqual(engine.action(0.1), 0.0200, places=3)
self.assertAlmostEqual(engine.action(0.9), 0.5006, places=3)
self.assertAlmostEqual(engine.action(4.0), 0.9657, places=3)
self.assertAlmostEqual(engine.action(100.0), 0.9999, places=3)
self.assertEqual( engine.action(-20.0), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fQF: [input, 4, 10]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.001), 0.0208, places=3)
self.assertAlmostEqual(engine.action(0.400), 0.7158, places=3)
self.assertAlmostEqual(engine.action(0.999), 11.282, places=2)
self.assertEqual( engine.action(1.000), float('inf'))
### check edge case handling ###
# no real edge cases (doesnt act like a delta anywhere)
### must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fPDF: [input, 0, 10]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fCDF: [input, 4, 0]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fQF: [input, 0, 10]
''')
self.assertRaises(PFAException, lambda: engine.action(0.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.fQF: [input, 4, 10]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## GAMMA DISTRIBUTION #####################
def testGammaDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaPDF: [input, 3.0, 3.0] #[input, shape, scale]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(1.000), 0.0133, places=3)
self.assertAlmostEqual(engine.action(2.000), 0.0380, places=3)
self.assertAlmostEqual(engine.action(4.000), 0.0781, places=3)
self.assertEqual( engine.action(-20.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaCDF: [input, 3.0, 3.0] #[input, shape, scale]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(3.000), 0.0803, places=3)
self.assertAlmostEqual(engine.action(6.000), 0.3233, places=3)
self.assertAlmostEqual(engine.action(10.00), 0.6472, places=3)
self.assertAlmostEqual(engine.action(100.0), 1.0000, places=3)
self.assertEqual( engine.action(-20.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaQF: [input, 3.0, 3.0] #[input, shape, scale]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.001), 0.5716, places=3)
self.assertAlmostEqual(engine.action(0.400), 6.8552, places=3)
self.assertAlmostEqual(engine.action(0.999), 33.687, places=2)
self.assertEqual( engine.action(1.000), float('inf'))
### it must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaPDF: [input, -1.3, -3.0] #[input, shape, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaCDF: [input, -3.0, 1.0] #[input, shape, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaQF: [input, -1.0, 3.0] #[input, shape, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.gammaQF: [input, 2.0, 3.0] #[input, shape, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## BETA DISTRIBUTION #####################
def testBetaDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaPDF: [input, 4, 3] #[input, shape1, shape2]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.100), 0.0486, places=3)
self.assertAlmostEqual(engine.action(0.800), 1.2288, places=3)
self.assertAlmostEqual(engine.action(-20.0), 0.0000, places=3)
self.assertEqual( engine.action(9.000), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaCDF: [input, 4, 3] #[input, shape1, shape2]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.100), 0.0013, places=3)
self.assertAlmostEqual(engine.action(0.900), 0.9842, places=3)
self.assertAlmostEqual(engine.action(4.000), 1.0000, places=3)
self.assertEqual( engine.action(-20.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaQF: [input, 4, 3] #[input, shape1, shape2]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.001), 0.0939, places=3)
self.assertAlmostEqual(engine.action(0.400), 0.5292, places=3)
self.assertAlmostEqual(engine.action(0.999), 0.9621, places=3)
self.assertEqual( engine.action(1.000), 1.0000)
### it must handle edge cases properly ###
## no real edge cases
### it must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaPDF: [input, 0, 3] #[input, shape1, shape2]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaCDF: [input, 4, -3] #[input, shape1, shape2]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaQF: [input, -4, 0] #[input, shape1, shape2]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.betaQF: [input, 4, 3] #[input, shape1, shape2]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## CAUCHY DISTRIBUTION #####################
def testCauchyDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyPDF: [input, 4, 3] #[input, location, scale]
''')
self.assertAlmostEqual(engine.action(-3.00), 0.0165, places=3)
self.assertAlmostEqual(engine.action(0.000), 0.0382, places=3)
self.assertAlmostEqual(engine.action(0.500), 0.0449, places=3)
self.assertAlmostEqual(engine.action(10.00), 0.0212, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyCDF: [input, 4, 3] #[input, location, scale]
''')
self.assertAlmostEqual(engine.action(0.000), 0.2048, places=3)
self.assertAlmostEqual(engine.action(0.100), 0.2087, places=3)
self.assertAlmostEqual(engine.action(0.900), 0.2448, places=3)
self.assertAlmostEqual(engine.action(4.000), 0.5000, places=3)
self.assertAlmostEqual(engine.action(-20.0), 0.0396, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyQF: [input, 4, 3] #[input, location, scale]
''')
self.assertEqual( engine.action(0.000), float('-inf'))
self.assertAlmostEqual(engine.action(0.001), -950.926, places=1)
self.assertAlmostEqual(engine.action(0.400), 3.0252, places=3)
self.assertAlmostEqual(engine.action(0.999), 958.926, places=2)
self.assertEqual( engine.action(1.000), float('inf'))
### must handle edge cases ###
## cauchy distribution DOESNT become a delta fcn when scale=0
### must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyPDF: [input, 4, -3] #[input, location, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyCDF: [input, 4, 0] #[input, location, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyQF: [input, 4, -1] #[input, location, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.cauchyQF: [input, 4, 3] #[input, location, scale]
''')
self.assertRaises(PFAException, lambda: engine.action(1.4))
self.assertRaises(PFAException, lambda: engine.action(-.4))
############## LOGNORMAL DISTRIBUTION #####################
def testLogNormalDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalPDF: [input, 2.0, 1.0] #[input, meanlog, sdlog]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(1.000), 0.0539, places=3)
self.assertAlmostEqual(engine.action(2.000), 0.0849, places=3)
self.assertAlmostEqual(engine.action(4.000), 0.0826, places=3)
self.assertEqual( engine.action(-20.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalCDF: [input, 2.0, 1.0] #[input, meanlog, sdlog]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.900), 0.0176, places=3)
self.assertAlmostEqual(engine.action(4.000), 0.2697, places=3)
self.assertAlmostEqual(engine.action(100.0), 0.9954, places=3)
self.assertEqual( engine.action(-20.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalQF: [input, 2.0, 1.0] #[input, meanlog, sdlog]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(0.001), 0.3361, places=3)
self.assertAlmostEqual(engine.action(0.400), 5.7354, places=3)
self.assertAlmostEqual(engine.action(0.999), 162.43, places=2)
self.assertEqual( engine.action(1.000), float('inf'))
### must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalPDF: [input, 2.0, -3.0] #[input, meanlog, sdlog]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalCDF: [input, 2.0, 0.0] #[input, meanlog, sdlog]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalQF: [input, 2.0, -1.0] #[input, meanlog, sdlog]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.lognormalQF: [input, 2.0, 1.0] #[input, meanlog, sdlog]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## STUDENTT DISTRIBUTION #####################
def testStudentTDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tPDF: [input, 2] #[input, degrees of freedom, noncentrality]
''')
self.assertAlmostEqual(engine.action(-1.00), 0.1924, places=3)
self.assertAlmostEqual(engine.action(1.000), 0.1924, places=3)
self.assertAlmostEqual(engine.action(2.000), 0.0680, places=3)
self.assertAlmostEqual(engine.action(4.000), 0.0131, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tCDF: [input, 2] #[input, degrees of freedom, noncentrality]
''')
self.assertAlmostEqual(engine.action(-0.90), 0.2315, places=3)
self.assertAlmostEqual(engine.action(0.000), 0.5000, places=3)
self.assertAlmostEqual(engine.action(0.900), 0.7684, places=3)
self.assertAlmostEqual(engine.action(100.0), 0.9999, places=3)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tQF: [input, 2] #[input, degrees of freedom, noncentrality]
''')
self.assertEqual( engine.action(0.000), float('-inf'))
self.assertAlmostEqual(engine.action(0.001), -22.33, places=2)
self.assertAlmostEqual(engine.action(0.400), -.2887, places=3)
self.assertAlmostEqual(engine.action(0.999), 22.327, places=2)
self.assertEqual( engine.action(1.000), float('inf'))
### must handle exceptions properly ###
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tPDF: [input, -2] #[input, degrees of freedom, noncentrality]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tCDF: [input, -1] #[input, degrees of freedom, noncentrality]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tQF: [input, 0] #[input, degrees of freedom, noncentrality]
''')
self.assertRaises(PFAException, lambda: engine.action(.4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.tQF: [input, 2] #[input, degrees of freedom, noncentrality]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## BINOMIAL DISTRIBUTION #####################
def testBinomialDistribution(self):
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.binomialPDF: [input, 4, .4] #[input, size, prob]
''')
self.assertEqual( engine.action(0), 0.1296)
self.assertAlmostEqual(engine.action(1), 0.3456, places=3)
self.assertAlmostEqual(engine.action(2), 0.3456, places=3)
self.assertAlmostEqual(engine.action(10), 0.0000, places=3)
self.assertEqual( engine.action(-20), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.binomialCDF: [input, 4, .4] #[input, size, prob]
''')
self.assertAlmostEqual(engine.action(0.0), 0.1296, places=3)
self.assertAlmostEqual(engine.action(2.0), 0.8208, places=3)
self.assertAlmostEqual(engine.action(2.5), 0.8208, places=3)
self.assertAlmostEqual(engine.action(10.0), 1.0000, places=3)
self.assertEqual( engine.action(-10.0), 0.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.binomialQF: [input, 4, .4] #[input, size, prob]
''')
self.assertEqual(engine.action(0.0), 0.0)
self.assertEqual(engine.action(0.3), 1.0)
self.assertEqual(engine.action(0.5), 2.0)
self.assertEqual(engine.action(0.8), 2.0)
self.assertEqual(engine.action(1.0), 4.0)
### must handle edge cases properly ###
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.binomialPDF: [input, 4, 0.0] #[input, size, prob]
''')
self.assertEqual(engine.action(0), 1.0)
self.assertEqual(engine.action(1), 0.0)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.binomialCDF: [input, 4, 0.0] #[input, size, prob]
''')
self.assertEqual(engine.action(0.0), 1.0000)
self.assertEqual(engine.action(-1.0), 0.0000)
self.assertEqual(engine.action(2.0), 1.0000)
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.binomialQF: [input, 4, 0.0] #[input, size, prob]
''')
self.assertEqual(engine.action(0.0), 0.0000)
self.assertEqual(engine.action(0.3), 0.0000)
self.assertEqual(engine.action(1.0), 4.0000)
### must raise the right exceptions ###
engine, = PFAEngine.fromYaml('''
input: int
output: double
action:
- prob.dist.binomialPDF: [input, -4, 0.4] #[input, size, prob]
''')
self.assertRaises(PFAException, lambda: engine.action(5))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.binomialCDF: [input, 4, 1.1] #[input, size, prob]
''')
self.assertRaises(PFAException, lambda: engine.action(4))
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.binomialQF: [input, 4, 0.4] #[input, size, prob]
''')
self.assertRaises(PFAException, lambda: engine.action(-.4))
self.assertRaises(PFAException, lambda: engine.action(1.4))
############## UNIFORM DISTRIBUTION #####################
def testUniformDistribution(self):
engine, = PFAEngine.fromYaml('''
input: double
output: double
action:
- prob.dist.uniformPDF: [input, 1.0, 3.0] #[input, min, max]
''')
self.assertEqual( engine.action(0.000), 0.0000)
self.assertAlmostEqual(engine.action(1.000), 0.5000, places=3)
self.assertAlmostEqual(engine.action(2.000), 0.5000, | |
<filename>template/plugin/file.py
#
# The Template-Python distribution is Copyright (C) <NAME> 2007-2008,
# derived from the Perl Template Toolkit Copyright (C) 1996-2007 Andy
# Wardley. All Rights Reserved.
#
# The file "LICENSE" at the top level of this source distribution describes
# the terms under which this file may be distributed.
#
import os
import re
try:
import pwd
import grp
except ImportError:
# Modules not available, probably because this is Windows:
pwd = grp = None
from template import util
from template.plugin import Plugin
from template.util import TemplateException
"""
template.plugin.file - Plugin providing information about files
SYNOPSIS
[% USE File(filepath) %]
[% File.path %] # full path
[% File.name %] # filename
[% File.dir %] # directory
DESCRIPTION
This plugin provides an abstraction of a file. It can be used to
fetch details about files from the file system, or to represent
abstract files (e.g. when creating an index page) that may or may not
exist on a file system.
A file name or path should be specified as a constructor argument. e.g.
[% USE File('foo.html') %]
[% USE File('foo/bar/baz.html') %]
[% USE File('/foo/bar/baz.html') %]
The file should exist on the current file system (unless 'nostat'
option set, see below) as an absolute file when specified with as
leading '/' as per '/foo/bar/baz.html', or otherwise as one relative
to the current working directory. The initializer performs a stat()
on the file and makes the 13 elements returned available as the plugin
items:
dev ino mode nlink uid gid rdev size
atime mtime ctime blksize blocks
e.g.
[% USE File('/foo/bar/baz.html') %]
[% File.mtime %]
[% File.mode %]
...
In addition, the 'user' and 'group' items are set to contain the user
and group names as returned by calls to getpwuid() and getgrgid() for
the file 'uid' and 'gid' elements, respectively. On Win32 platforms
on which getpwuid() and getgrid() are not available, these values are
None.
[% USE File('/tmp/foo.html') %]
[% File.uid %] # e.g. 500
[% File.user %] # e.g. abw
This user/group lookup can be disabled by setting the 'noid' option.
[% USE File('/tmp/foo.html', noid=1) %]
[% File.uid %] # e.g. 500
[% File.user %] # nothing
The 'isdir' flag will be set if the file is a directory.
[% USE File('/tmp') %]
[% File.isdir %] # 1
If the stat() on the file fails (e.g. file doesn't exists, bad
permission, etc) then the constructor will throw a 'File' exception.
This can be caught within a TRY...CATCH block.
[% TRY %]
[% USE File('/tmp/myfile') %]
File exists!
[% CATCH File %]
File error: [% error.info %]
[% END %]
Note the capitalisation of the exception type, 'File' to indicate an
error thrown by the 'File' plugin, to distinguish it from a regular
'file' exception thrown by the Template Toolkit.
Note that the 'File' plugin can also be referenced by the lower case
name 'file'. However, exceptions are always thrown of the 'File'
type, regardless of the capitalisation of the plugin named used.
[% USE file('foo.html') %]
[% file.mtime %]
As with any other Template Toolkit plugin, an alternate name can be
specified for the object created.
[% USE foo = file('foo.html') %]
[% foo.mtime %]
The 'nostat' option can be specified to prevent the plugin initializer
from performing a stat() on the file specified. In this case, the
File does not have to exist in the file system, no attempt will be
made to verify that it does, and no error will be thrown if it
doesn't. The entries for the items usually returned by stat() will be
set empty.
[% USE file('/some/where/over/the/rainbow.html', nostat=1) %]
[% file.mtime %] # nothing
All File plugins, regardless of the nostat option, have set a number
of items relating to the original path specified.
* path
The full, original file path specified to the constructor.
[% USE file('/foo/bar.html') %]
[% file.path %] # /foo/bar.html
* name
The name of the file without any leading directories.
[% USE file('/foo/bar.html') %]
[% file.name %] # bar.html
* dir
The directory element of the path with the filename removed.
[% USE file('/foo/bar.html') %]
[% file.name %] # /foo
* ext
The file extension, if any, appearing at the end of the path following
a '.' (not included in the extension).
[% USE file('/foo/bar.html') %]
[% file.ext %] # html
* home
This contains a string of the form '../..' to represent the upward path
from a file to its root directory.
[% USE file('bar.html') %]
[% file.home %] # nothing
[% USE file('foo/bar.html') %]
[% file.home %] # ..
[% USE file('foo/bar/baz.html') %]
[% file.home %] # ../..
* root
The 'root' item can be specified as a constructor argument, indicating
a root directory in which the named file resides. This is otherwise
set empty.
[% USE file('foo/bar.html', root='/tmp') %]
[% file.root %] # /tmp
* abs
This returns the absolute file path by constructing a path from the
'root' and 'path' options.
[% USE file('foo/bar.html', root='/tmp') %]
[% file.path %] # foo/bar.html
[% file.root %] # /tmp
[% File.abs %] # /tmp/foo/bar.html
In addition, the following method is provided:
* rel(path)
This returns a relative path from the current file to another path specified
as an argument. It is constructed by appending the path to the 'home'
item.
[% USE file('foo/bar/baz.html') %]
[% file.rel('wiz/waz.html') %] # ../../wiz/waz.html
EXAMPLES
[% USE file('/foo/bar/baz.html') %]
[% file.path %] # /foo/bar/baz.html
[% file.dir %] # /foo/bar
[% file.name %] # baz.html
[% file.home %] # ../..
[% file.root %] # ''
[% file.abs %] # /foo/bar/baz.html
[% file.ext %] # html
[% file.mtime %] # 987654321
[% file.atime %] # 987654321
[% file.uid %] # 500
[% file.user %] # abw
[% USE file('foo.html') %]
[% file.path %] # foo.html
[% file.dir %] # ''
[% file.name %] # foo.html
[% file.root %] # ''
[% file.home %] # ''
[% file.abs %] # foo.html
[% USE file('foo/bar/baz.html') %]
[% file.path %] # foo/bar/baz.html
[% file.dir %] # foo/bar
[% file.name %] # baz.html
[% file.root %] # ''
[% file.home %] # ../..
[% file.abs %] # foo/bar/baz.html
[% USE file('foo/bar/baz.html', root='/tmp') %]
[% file.path %] # foo/bar/baz.html
[% file.dir %] # foo/bar
[% file.name %] # baz.html
[% file.root %] # /tmp
[% file.home %] # ../..
[% file.abs %] # /tmp/foo/bar/baz.html
# calculate other file paths relative to this file and its root
[% USE file('foo/bar/baz.html', root => '/tmp/tt2') %]
[% file.path('baz/qux.html') %] # ../../baz/qux.html
[% file.dir('wiz/woz.html') %] # ../../wiz/woz.html
"""
STAT_KEYS = ("dev", "ino", "mode", "nlink", "uid", "gid", "rdev", "size",
"atime", "mtime", "ctime", "blksize", "blocks")
class File(Plugin):
"""Plugin for encapsulating information about a system file."""
def __init__(self, context, path, config=None):
"""Initialize a new File object.
Takes the pathname of the file as the argument following the
context and an optional dictionary of configuration parameters.
"""
if not isinstance(config, dict):
config = {}
if not path:
self.throw("no file specified")
if os.path.isabs(path):
root = ""
else:
root = config.get("root")
if root:
if root.endswith("/"):
root = root[:-1]
else:
root = ""
dir, name = os.path.split(path)
name, ext = util.unpack(re.split(r"(\.\w+)$", name), 2)
if ext is None:
ext = ""
if dir.endswith("/"):
dir = dir[:-1]
if dir == ".":
dir = ""
name = name + ext
if ext.startswith("."):
ext = ext[1:]
fields = splitpath(dir)
if fields and not fields[0]:
fields.pop(0)
home = "/".join(("..",) * len(fields))
abspath = os.path.join(root, path)
self.path = path
self.name = name
self.root = root
self.home = home
self.dir = dir
self.ext = ext
self.abs = abspath
self.user = ""
self.group = ""
self.isdir = ""
self.stat = config.get("stat") or not config.get("nostat")
if self.stat:
try:
stat = os.stat(abspath)
except OSError, e:
self.throw("%s: %s" % (abspath, e))
for key in STAT_KEYS:
setattr(self, key, getattr(stat, "st_%s" % key, None))
if not config.get("noid"):
self.user = pwd and getpwuid(self.uid)
self.group = grp and getgrgid(self.gid)
self.isdir = os.path.isdir(abspath)
else:
for key in STAT_KEYS:
setattr(self, key, "")
def rel(self, path):
"""Generate a relative filename for some other file relative to this one.
"""
if isinstance(path, self.__class__):
path = path.path
if path.startswith("/"):
return path
elif not self.home:
return path
else:
return "%s/%s" % (self.home, path)
def throw(self, error):
raise TemplateException('File', error)
def splitpath(path):
def helper(path):
while True:
path, base = os.path.split(path)
if base:
yield base
else:
break
pathcomp = list(helper(path))
pathcomp.reverse()
return pathcomp
def getpwuid(uid):
try:
return pwd.getpwuid(uid).pw_name
except KeyError:
return uid
def getgrgid(gid):
try:
return | |
<reponame>bell-bot/audio_adversarial_examples<filename>datasets.py
# -*- coding: future_fstrings -*-
import os
from re import L
from typing import Dict, Tuple
import sys
import logging
import numpy
import regex as re
# import torchaudio.datasets.tedlium as tedlium
import librosa
from Data import tedlium_local as tedlium
import torchaudio
from torch import Tensor
import pandas as pd
from utils import get_git_root
from Preprocessing.pre_processing import resample_audio
######### ------------------ PATHING ------------- ############
"""Specify path to TEDLIUM directory"""
data_paths = os.path.join(get_git_root(os.getcwd()) ,'Data')
DATASET_TEDLIUM_PATH = data_paths
DATASET_MLCOMMONS_PATH = data_paths
KEYWORDS_LINK_CSV_PATH = os.path.join(data_paths, "KeywordPerSample", "keywords.csv")
KEYPHRASES_LINK_CSV_PATH = os.path.join(data_paths, "Keyphrases" , "keyphrases.csv")
LABELS_KEYPHRASES_CSV_PATH = os.path.join(data_paths, "Keyphrases" , "labels.csv")
############# ---------CSV HEADERS --------------################
#TODO! Might be better to have a header called keyword_id, in order to take into account the different varations of keywords and phrases inside the same sample
class KeywordsCSVHeaders:
"""
Represents the fields keywords.csv file
KEYWORD: The keyword linking the two audio files (sample of a TED audio file and an MSWC recording of that keyword)
TED_SAMPLE_ID: Represents the sample id of an audio. In other words, it is a unique id that maps to a segment of a TED audio file.
Hence, this is NOT the same as "talk_id", which represents the id of an entire audio file
TED_DATASET_TYPE: The type of dataset the sample exists in (Train vs Dev vs Test set)
MSWC_ID: The id of the keyword recording
"""
KEYWORD = "Keyword"
TED_SAMPLE_ID= "TEDLIUM_SampleID"
TED_DATASET_TYPE = "TEDLIUM_SET"
MSWC_ID = "MSWC_AudioID"
CSV_header = [KEYWORD, TED_SAMPLE_ID, TED_DATASET_TYPE, MSWC_ID]
class KeyphrasesCSVHeaders:
KEYWORD = "Keyword"
TED_SAMPLE_ID= "TEDLIUM_SampleID"
TED_DATASET_TYPE = "TEDLIUM_SET"
MSWC_ID = "MSWC_AudioID"
KEYWORD_ID = "Word_ID"
CSV_header = [KEYWORD, TED_SAMPLE_ID, TED_DATASET_TYPE, MSWC_ID, KEYWORD_ID]
class LabelsCSVHeaders:
"""
Represents the fields labels.csv file
KEYWORD: The keyword linking the two audio files (sample of a TED audio file and an MSWC recording of that keyword)
TED_SAMPLE_ID: Represents the sample id of an audio. In other words, it is a unique id that maps to a segment of a TED audio file.
Hence, this is NOT the same as "talk_id", which represents the id of an entire audio file
TED_DATASET_TYPE: The type of dataset the sample exists in (Train vs Dev vs Test set)
MSWC_ID: The id of the keyword recording
"""
KEYWORD = "Keyword"
# Keyword_id = "Keyword_id"
TED_SAMPLE_ID= "TEDLIUM_SampleID"
TED_DATASET_TYPE = "TEDLIUM_SET"
TED_TALK_ID = "TED_TALK_ID"
MSWC_ID = "MSWC_AudioID"
START_TIMESTAMP = "start_time"
END_TIMESTAMP = "end_time"
CONFIDENCE = "confidence"
CSV_header = [KEYWORD, TED_SAMPLE_ID,TED_TALK_ID, TED_DATASET_TYPE, MSWC_ID, START_TIMESTAMP, END_TIMESTAMP, CONFIDENCE]
############# --------- DATASETS --------------################
#TODO! Customise for each subset, in speaker-adaptation. Might require changing the metadata
class TEDLIUMCustom(tedlium.TEDLIUM):
"""
Please have a directory with the TEDLIUM dataset downloaded (release-3).
Instance Variables:
self._path:
self._filelist:
self._dict_path:
self._phoneme_dict:
Additional Instance Variables:
self.train_audio_sets
self.dev_audio_sets
self.test_audio_sets
"""
def __init__(self, root=DATASET_TEDLIUM_PATH, release= "release3", subset=None):
super().__init__(root, release=release)
path_to_speaker_adaptation = os.path.join(root, tedlium._RELEASE_CONFIGS[release]["folder_in_archive"], "speaker-adaptation")
train_audio_sets = set(line.strip() for line in open(os.path.join(path_to_speaker_adaptation, "train.lst")))
dev_audio_sets = set(line.strip() for line in open(os.path.join(path_to_speaker_adaptation, "dev.lst")))
test_audio_sets = set(line.strip() for line in open(os.path.join(path_to_speaker_adaptation, "test.lst")))
self.recordings_set_dict = {
"train": train_audio_sets,
"dev": dev_audio_sets,
"test": test_audio_sets
}
def __len__(self) -> int:
"""Get number of items.
Returns:
int: TEDLIUM Dataset Length
"""
return super().__len__()
def _load_audio(self, path: str, start_time: float, end_time: float, sample_rate: int = 16000, to_numpy=True) -> [Tensor, int]:
"""
Returns audio data
Args:
Returns:
"""
waveform, sample_rate = super()._load_audio(path, start_time, end_time, sample_rate)
return (waveform.numpy(), sample_rate) if to_numpy else (waveform , sample_rate)
def __getitem__(self, sampleID: int) -> Dict:
"""Load the n-th sample from the dataset, where n is the audioFileID/fileSampleId
Please note that filesampleID is different from talk_id returned by the function, which denotes the entire recording instead
Args:
AudioFileID (int): The index of the sample to be loaded, which is also termed as the unique ID
Returns:
Dictionary: ``(waveform, sample_rate, transcript, talk_id, speaker_id, identifier, start_time, end_time)``
"""
fileid, line = self._filelist[sampleID]
return self._load_tedlium_item(fileid, line, self._path)
def get_audio_file(self, sampleID:int):
fileid, line = self._filelist[sampleID]
return os.path.join(self._path, "sph", fileid)
def _load_tedlium_item(self, fileid: str, line: int, path: str) -> Dict:
"""Loads a TEDLIUM dataset sample given a file name and corresponding sentence name. Functionality taken from original source code.
----> Custom function returns start time and end time as well
Args:
fileid (str): File id to identify both text and audio files corresponding to the sample
line (int): Line identifier for the sample inside the text file
path (str): Dataset root path
Returns:
Dictionary
(Tensor, int, str, int, int, int):
``(waveform, sample_rate, transcript, talk_id, speaker_id, identifier, start_time, end_time)``
"""
transcript_path = os.path.join(path, "stm", fileid)
with open(transcript_path + ".stm") as f:
transcript = f.readlines()[line]
talk_id, _, speaker_id, start_time, end_time, identifier, transcript = transcript.split(" ", 6)
wave_path = os.path.join(path, "sph", fileid)
waveform, sample_rate = self._load_audio(wave_path + self._ext_audio, start_time=start_time, end_time=end_time)
results_dict = {
"waveform": waveform,
"sample_rate": sample_rate,
"transcript": transcript,
"talk_id": talk_id,
"speaker_id":speaker_id ,
"identifier": identifier ,
"start_time": float(start_time),
"end_time": float(end_time),
}
return results_dict
class MultiLingualSpokenWordsEnglish():
MLCOMMONS_FOLDER_NAME = "Multilingual_Spoken_Words"
AUDIO_DIR_NAME="audio"
SPLITS_DIR_NAME="splits"
ALIGNMENTS_DIR_NAME="alignments"
def raise_directory_error(self):
raise RuntimeError(
"Please configure the path to the Spoken Keywords Dataset, with the directory name \"{}\", containing the three subfolders:".format(self.MLCOMMONS_FOLDER_NAME) \
+ "\n" + \
"\"{}\" for audio, \"{}\" for splits directory, and \"{}\" for alignemnts directory".format(self.AUDIO_DIR_NAME,self.SPLITS_DIR_NAME,self.ALIGNMENTS_DIR_NAME)
)
#TODO! Accept 4 kinds of values: Train vs test vs Dev vs "all"
def __init__(self, root=DATASET_MLCOMMONS_PATH, read_splits_file=False, subset="train") -> None:
"""
Loads the MLCommons MultiLingual dataset (English version).
read_splits_file is used to generate the keywords csv file
"""
if self.MLCOMMONS_FOLDER_NAME not in os.listdir(root):
self.raise_directory_error()
self._path = os.path.join(root, self.MLCOMMONS_FOLDER_NAME)
#Initialise the folder names into dictionaries
self._subfolder_names_dict = {
"audio" : self.AUDIO_DIR_NAME,
"splits" : self.SPLITS_DIR_NAME,
"alignments": self.ALIGNMENTS_DIR_NAME,
}
#Check if all three subfolders are in the directory. Exit if they are not all there
current_subfolders = os.listdir(self._path)
if not all([subfolder_name in current_subfolders for subfolder_name in self._subfolder_names_dict.values()]):
self.raise_directory_error()
#Retrieve the splits csv file from MSWC folder
if read_splits_file:
self._path_to_splits = os.path.join(self._path, self._subfolder_names_dict["splits"])
self.splits_df = pd.read_csv(os.path.join(self._path_to_splits, "en_splits.csv"))
if subset == "train":
self.splits_df = self.splits_df[self.splits_df["SET"] == "TRAIN"]
elif subset == "dev":
self.splits_df = self.splits_df[self.splits_df["SET"] == "VALID"]
else:
self.splits_df = self.splits_df[self.splits_df["SET"] == "TEST"]
#Extra step to preprocesses words to one form of apostrophe
self.splits_df["WORD"].replace("`|’", "'", regex=True, inplace=True)
#Retrieve the words that have been validated as True, affirming that the spoken audio matches the transcription
self.splits_df = self.splits_df[self.splits_df["VALID"] == True]
#Retrieve the keywords in the dataset
self.keywords = set(self.splits_df["WORD"].unique())
def _load_audio(self, path_to_audio, to_numpy=True):
"""Loads audio data from file given file path
Returns:
waveform: Tensor / np.array
sample_rate: int
"""
# waveform, sample_rate = torchaudio.load(path_to_audio)
# return (waveform.numpy(), sample_rate) if to_numpy else (waveform , sample_rate)
waveform, sample_rate = librosa.load(path_to_audio)
return (waveform, sample_rate) if to_numpy else (waveform , sample_rate)
def __getitem__(self, MSWC_AudioID) -> Dict:
"""Retrieves sample data from file given Audio ID
"""
path_to_audio = os.path.join(self._path,self.AUDIO_DIR_NAME ,"en", "clips", MSWC_AudioID)
waveform, sample_rate= self._load_audio(path_to_audio)
results_dict = {
"waveform": waveform,
"sample_rate": sample_rate ,
"MSWC_AudioID": MSWC_AudioID
}
return results_dict
#TODO! Create mapping between talk ids and datatype set (i.e not just sample mapping). Use the defined train_audio_sets, dev_audio_sets, test_audio_sets to help. Might be better to implement this in the TEDLIUMCustom instead of here.
class CTRLF_DatasetWrapper:
COLS_OUTPUT= ['TED_waveform', 'TED_sample_rate', 'TED_transcript', 'TED_talk_id', 'TED_start_time', 'TED_end_time', 'MSWC_audio_waveform', 'MSWC_sample_rate', 'MSWC_ID', 'keyword', 'keyword_start_time', 'keyword_end_time', 'confidence']
"""
Main class wrapper for both TEDLIUM dataset and MSWC dataset. Using the labels csv file, use the functions to retrieve audio samples and their corresponding keywords that was linked to.
Args:
single_keywords_label: Represents a toggle which defines what types of labels we are dealing with.
------------> NOTE: This was added for the time being as handling of multiple keywords may require some changes in the implementation of the code here and elsewhere
"""
def __init__(self,path_to_labels_csv=LABELS_KEYPHRASES_CSV_PATH, path_to_TED=DATASET_TEDLIUM_PATH, path_to_MSWC=DATASET_MLCOMMONS_PATH, single_keywords_labels=True):
self._path_to_TED = path_to_TED
self._path_to_MSWC = path_to_MSWC
self.single_keywords_labels = single_keywords_labels
#Initialise keyword dataframe
self.labels_df = pd.read_csv(path_to_labels_csv)
#Initialise Ted talk dataset
self.TED = TEDLIUMCustom(root=path_to_TED,release="release3")
#Initialise Keyword dataset
self.MSWC = MultiLingualSpokenWordsEnglish(root=path_to_MSWC)
def get(self, TEDSample_id: int, sampling_rate=16000):
"""
Given Ted Sample ID and the dataset type, return three separate corresponding dictionaries.
Returns: DataFrame
Headers:
['TED_waveform', 'TED_sample_rate', 'TED_transcript', 'TED_talk_id', 'TED_start_time', 'TED_end_time', 'MSWC_audio_waveform', 'MSWC_sample_rate', 'MSWC_ID', 'keyword', 'keyword_start_time', 'keyword_end_time', 'confidence']
"""
output_df = pd.DataFrame(columns=self.COLS_OUTPUT)
TED_results_dict = self.TED.__getitem__(TEDSample_id)
TEDSample_id = str(TEDSample_id) #TODO: Return pandas | |
<gh_stars>0
import json
import os
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, login as li, logout as lo
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.hashers import make_password
from django.contrib.auth.signals import user_login_failed
from django.forms.models import model_to_dict
from django.db import IntegrityError, InternalError
from django.http import JsonResponse, Http404
from django.shortcuts import render, HttpResponseRedirect, reverse, get_object_or_404
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View
from django.contrib.sitemaps import Sitemap
from accounts.forms import (CreateBaseUserInstance, CreateIndividualProfileForm,
CreateCompanyNameInTripCreationForm, CreateCorporateProfileForm, ChangeProfilePictureForm,
ChangeProfileDescriptionForm, UpdateBaseUserInstance,
UpdateIndividualProfileForm, UpdatePrivacySettingsForm, ChangePasswordForm,
ResetPasswordByEmailForm, ResetPasswordByTokenForm, UpdateCorporateProfileContactForm,
UpdateCorporateProfileInformationForm, UpdateCompanyLogoForm, UpdateTravelInquiryForm)
from accounts.models import User, IndividualProfile, CompanyName, CorporateProfile, generate_short_uuid4
from jagdreisencheck.cryptography import (generate_password_reset_token, decode_password_reset_token)
from mailing.views import send_mail, validate_referral
from travelling.models import Trip, Rating
from inquiries.models import TripInquiry
# Create your views here.
def login(request):
template_name = 'accounts/login/login-page.html'
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
nxt = request.POST.get('next')
if username and password:
user = authenticate(request, email=username, password=password)
if user is not None:
if not user.is_active:
messages.error(request, _('Your Account is inactive!'), extra_tags='danger')
return HttpResponseRedirect(reverse('accounts:user_login'))
else:
li(request, user)
if user.is_company:
nxt = reverse('accounts:console')
elif str(nxt) == str(reverse('accounts:user_login')):
nxt = '/'
return HttpResponseRedirect(nxt)
else:
user_login_failed.send(
sender=User,
request=request,
credentials={
'email': username
},
)
messages.error(request, _('An account with these credential does not exist. Please try again or reset '
'your password.'))
return HttpResponseRedirect(reverse('accounts:user_login'))
return render(request=request, context={}, template_name=template_name)
def logout(request):
lo(request)
return HttpResponseRedirect("/")
def register_user(request):
if request.user.is_authenticated and not settings.DEBUG and not eval(os.environ['testsystem']):
return HttpResponseRedirect("/")
template = 'accounts/registration/user-registration/base.html'
user_form = CreateBaseUserInstance
profile_form = CreateIndividualProfileForm
context = {
'user_form': user_form,
'profile_form': profile_form
}
if request.method == 'POST':
uform = user_form(request.POST)
pform = profile_form(request.POST, request.FILES)
context = {
'user_form': uform,
'profile_form': pform
}
if uform.is_valid():
if request.POST.get('password') == request.POST.get('confirm_passwd'):
uform = uform.save(commit=False)
uform.password = <PASSWORD>(request.POST.get('password'))
uform.is_company = False
if pform.is_valid():
uform.save()
pform = pform.save(commit=False)
pform.id = uform.id
pform.user = User.objects.get(pk=uform.pk)
pform.save()
ref_link = '{}{}{}{}{}'.format(reverse('accounts:validate_account'), '?token=',
uform.activation_key['token'].decode('utf-8'), '&email=',
uform.email)
if request.POST.get('next'):
ref_link += '&next={}'.format(request.POST.get('next'))
mctx = {
'headline': _('Welcome aboard!'),
'request': request,
'user': uform,
'token': uform.activation_key['token'].decode('utf-8'),
'link': ref_link
}
html_template = 'accounts/registration/user-registration/sign-up-email.html'
send_mail(subject=_('Verify your Account'), recipients=[uform.email], html_template=html_template,
context=mctx)
uform.referral_code = uform.pk
uform.save()
if request.POST.get('referred_by'):
try:
user = User.objects.get(referral_code=request.POST.get('referred_by'))
uform.referred_by = user.referral_code
uform.save()
except User.DoesNotExist or IntegrityError or IndexError or User.MultipleObjectsReturned:
pass
return HttpResponseRedirect(reverse('accounts:thankyou'))
else:
uform.delete()
return render(request, template, context)
else:
uform.errors['password'] = _('The passwords do not match')
context['user_form'] = uform
return render(request, template, context)
return render(request, template, context)
def check_username_email(request):
email = False
if request.GET.get('email'):
email = request.GET.get('email')
try:
user = User.objects.get(email=email)
email = False
except User.DoesNotExist:
email = True
resp = json.dumps({'email': email})
return JsonResponse(resp, safe=False)
def register_company(request):
if request.user.is_authenticated and not settings.DEBUG and not eval(os.environ['testsystem']):
return HttpResponseRedirect("/")
template = 'accounts/registration/company-registration/base.html'
user_form = CreateBaseUserInstance
company_form = CreateCompanyNameInTripCreationForm
profile_form = CreateCorporateProfileForm
context = {
'user_form': user_form,
'company_form': company_form,
'profile_form': profile_form
}
if request.method == 'POST':
uform = user_form(request.POST)
cform = company_form(request.POST)
pform = profile_form(request.POST)
context = {
'user_form': uform,
'company_form': cform,
'profile_form': pform,
}
if uform.is_valid():
if request.POST.get('password') == request.POST.get('confirm_passwd'):
uform = uform.save(commit=False)
uform.password = <PASSWORD>(request.POST.get('password'))
uform.is_company = True
try:
uform.save()
except IntegrityError or InternalError:
messages.error(request=request, message=_('The user already exists! Please try resetting your '
'password or drop us an e-mail'))
return render(request, template, context)
try:
instance = CompanyName.objects.get(name__iexact=request.POST.get('name'))
company_name = CreateCompanyNameInTripCreationForm(request.POST, instance=instance)
if instance.has_profile:
messages.error(request, message=_('This company already has a corporate profile. '
'If this is your company and you perceive an abuse contact us'
'immediately at <EMAIL>.'))
return HttpResponseRedirect("/")
if company_name.is_valid():
company_name = company_name.save(commit=False)
company_name.has_profile = True
company_name.save()
else:
messages.error(request, message=_('There is a problem with your data.'))
return render(request, template, context)
except CompanyName.DoesNotExist:
if cform.is_valid():
company_name = cform.save(commit=False)
company_name.id = uform.id
company_name.created_by = User.objects.get(id=uform.id)
company_name.has_profile = True
company_name.slug = slugify(company_name.name).replace("-", "_")
company_name.save()
else:
uform.delete()
return render(request, template, context)
if pform.is_valid():
profile_form = pform.save(commit=False)
profile_form.id = uform.id
profile_form.company_name = company_name
profile_form.admin = uform
profile_form.save()
uform.company_name = company_name.name
uform.save()
ref_link = '{}{}{}{}{}{}{}'.format(reverse('accounts:validate_account'), '?token=',
uform.activation_key['token'].decode('utf-8'), '&email=',
uform.email, '&next=', reverse('travelling:create_trip'))
mctx = {
'headline': _('Welcome aboard!'),
'request': request,
'user': uform,
'token': uform.activation_key['token'].decode('utf-8'),
'link': ref_link
}
html_template = 'accounts/registration/company-registration/sign-up-email.html'
send_mail(subject=_('Verify your Account'), recipients=[uform.email], html_template=html_template,
context=mctx)
messages.success(request=request,
message=_('Thank you for registering! Please check your e-Mails.'))
return HttpResponseRedirect("{}{}".format(reverse('accounts:thankyou'), "?corporate"))
else:
uform.delete()
company_name.has_profile = False
company_name.created_by = None
company_name.save()
return render(request, template, context)
else:
messages.error(request=request, message=_('The passwords do not match!'))
uform.errors['password'] = _("The passwords do not match")
return render(request, template, context)
return render(request, template, context)
@login_required
def create_company_name_in_trip(request):
if request.method == 'POST' and request.is_ajax():
form = CreateCompanyNameInTripCreationForm(request.POST)
try:
CompanyName.objects.get(name__iexact=request.POST.get("name"))
return JsonResponse(data={"msg": _("Company already exists!"), "errors": [_("Company already exists!")]})
except CompanyName.DoesNotExist:
pass
if form.is_valid():
form = form.save(commit=False)
form.id = generate_short_uuid4()
form.created_by = request.user
form.has_profile = False
form.slug = slugify(form.name).replace("-", "_")
form.save()
company = model_to_dict(form)
company['pk'] = form.pk
company = {
'pk': form.pk,
'name': form.name
}
return JsonResponse(data={
'msg': _('Company created successfully.'),
'company': company,
})
else:
return JsonResponse(data={
'msg': _('Error while creating company.'),
'errors': form.errors
})
else:
return JsonResponse(data={'msg': _('Forbidden request.')})
def profile_page(request, pk):
context = dict()
try:
user = User.objects.get(pk=pk)
if not request.user.is_authenticated:
messages.error(request=request,
message=_('Login required! Hunter profiles may only be viewed when logged in.'))
url = reverse('accounts:user_login')
url = '{}{}{}'.format(url, '?next=', request.path)
return HttpResponseRedirect(url)
except User.DoesNotExist:
try:
user = CorporateProfile.objects.get(company_name__slug=pk)
except CorporateProfile.DoesNotExist:
raise Http404(_("Does not exist"))
if user.company_name:
trips = Trip.objects.filter(company=user.company_name)
reviews = Rating.objects.filter(trip__company=user.company_name)
rate_count = 0
trip_count = 0
for trip in trips:
if type(trip.overall_rating).__name__ == 'NoneType':
trip_overall_rating = 0
else:
trip_overall_rating = trip.overall_rating
if trip_overall_rating > 0:
rate_count += trip.overall_rating
trip_count += 1
trip_counter = trips.exclude(overall_rating__isnull=True).count()
if trips.count() > 0 and trip_counter > 0:
avg_company_rating = rate_count / trip_counter
else:
avg_company_rating = 0
quick_facts = {'avg_company_rating': avg_company_rating}
template = 'accounts/company/company-profile.html'
context['object'] = user
context['quick_facts'] = quick_facts
context['reviews'] = reviews
context['trips'] = trips
if request.user.pk == user.pk:
context['change_profile_pic_form'] = ChangeProfilePictureForm
context['change_profile_descr_form'] = ChangeProfileDescriptionForm
else:
profile = IndividualProfile.objects.get(pk=pk)
reviews = Rating.objects.filter(user=profile.user)
template = 'accounts/user/user-profile.html'
context['object'] = profile
context['reviews'] = reviews
return render(request, template, context)
class UpdateProfileView(View):
'''
This function handles all user profile updates and manages the required forms.
Allowed methods are GET and POST. The context is parsed as objects (according to django good practice) and the
actual identifier name. Required methods are ´get` and `post´.
@:return Configured view that is capable of handling all profile updates.
'''
http_method_names = ['get', 'post']
login_required = True
def _get_model(self):
try:
profile = IndividualProfile.objects.get(pk=self.request.user.pk)
except IndividualProfile.DoesNotExist:
profile = CorporateProfile.objects.get(pk=self.request.user.pk)
except CorporateProfile.DoesNotExist:
return HttpResponseRedirect("/")
return profile
def _get_forms(self):
forms = dict()
forms['base_form'] = UpdateBaseUserInstance(instance=self.request.user)
forms['password_form'] = ChangePasswordForm()
if self.request.user.is_company:
forms['profile_contact_form'] = UpdateCorporateProfileContactForm(instance=self._get_model())
forms['profile_info_form'] = UpdateCorporateProfileInformationForm(instance=self._get_model())
forms['profile_logo_form'] = UpdateCompanyLogoForm(instance=self._get_model().company_name)
else:
forms['profile_form'] = UpdateIndividualProfileForm(instance=self._get_model())
forms['profile_info_form'] = UpdatePrivacySettingsForm(instance=self._get_model())
return forms
def get_context_data(self):
context = dict()
context['object'] = self._get_model()
context['profile'] = self._get_model()
context.update(self._get_forms())
return context
def _get_template(self):
if self.request.user.is_company:
template_name = 'accounts/company/settings/corporate-settings.html'
else:
template_name = 'accounts/user/settings/individual-settings.html'
return template_name
def get(self, request, *args, **kwargs):
if not self.request.user.is_authenticated:
messages.error(request=request, message=_('Forbidden request.'))
return HttpResponseRedirect("/")
return render(request=self.request, context=self.get_context_data(), template_name=self._get_template())
def post(self, request, *args, **kwargs):
if not self.request.user.is_authenticated:
messages.error(request=request, message=_('Forbidden request.'))
return HttpResponseRedirect("/")
if not self.request.POST.get('form-name'):
return self.get(self.request)
context = self.get_context_data()
form_name = self.request.POST.get('form-name')
if form_name == 'base-user':
request = self.request.POST.copy()
request['email'] = self.request.user.email
form = UpdateBaseUserInstance(request, instance=self.request.user)
if form.is_valid():
form = form.save(commit=False)
form.save()
messages.success(request=self.request, message=_("User data updated successfully."))
else:
context.update({'base_form': form})
return render(self.request, self._get_template(), context)
elif form_name == 'public-settings':
form = UpdateIndividualProfileForm(request.POST, files=self.request.FILES, instance=self._get_model())
if form.is_valid():
form = form.save(commit=False)
form.save()
messages.success(request=self.request, message=_("Profile data updated successfully."))
else:
context.update({'profile_form': form})
return render(self.request, self._get_template(), context)
elif form_name == 'corporate-contact':
form = UpdateCorporateProfileContactForm(data=request.POST, files=self.request.FILES, instance=self._get_model())
if form.is_valid():
form = form.save(commit=False)
form.save()
messages.success(request=request, message=_("Contact information changed successfully."))
else:
context.update({'profile_contact_form': form})
return render(self.request, self._get_template(), context)
elif form_name == 'corporate-profile':
data = self.request.POST.copy()
data['operator_type'] = self._get_model().operator_type
form = UpdateCorporateProfileInformationForm(data, request.FILES, instance=self._get_model())
form2 = UpdateCompanyLogoForm(request.POST, request.FILES, instance=self._get_model().company_name)
if form.is_valid() and form2.is_valid():
form = form.save(commit=False)
form.save()
form2.save()
messages.success(request=request, message=_("Profile information updated successfully."))
else:
context.update({'profile_info_form': form})
return render(self.request, self._get_template(), context)
elif form_name == 'privacy-settings':
form = UpdatePrivacySettingsForm(self.request.POST, files=self.request.FILES, instance=self._get_model())
if form.is_valid():
form = form.save(commit=False)
form.save()
messages.success(request=self.request, message=_("Privacy settings updated successfully."))
else:
context.update({'profile_info_form': form})
return render(self.request, self._get_template(), context)
elif form_name == 'password-settings':
form = ChangePasswordForm(request.POST, instance=self.request.user)
old_password = self.request.user.password
if form.is_valid():
| |
import ast
from threading import Thread
import sys
from queue import Queue, Empty
from subprocess import Popen, PIPE
import time
import datetime
import traceback
class PipeToJava:
def __init__(self, headless=True):
self.buffer = []
on_posix = 'posix' in sys.builtin_module_names
args = ['java', '-jar', '../BotTest.jar']
if not headless:
args.append("true")
self.p = Popen(args, stdin=PIPE, stdout=PIPE, bufsize=4096, close_fds=on_posix)
self.q = Queue()
self.t = Thread(target=self.enqueue_output, args=(self.p.stdout, self.q))
self.t.daemon = True
self.t.start()
def enqueue_output(self, out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def get_buffer(self):
local_buffer = []
try:
while 1:
read = self.q.get_nowait()
local_buffer.append(read.strip().decode('latin-1'))
except Empty:
pass
for message in local_buffer:
if len(message.split(';')) > 5: # Shitty attempt to differentiate actual messages and debug prints
self.buffer.append(message)
return self.buffer
def remove_from_buffer(self, bot_id, message_id):
new_buffer = []
for entry in self.buffer:
if not ('{};{}'.format(bot_id, message_id) in entry):
new_buffer.append(entry)
self.buffer = new_buffer[:]
class Interface:
def __init__(self, bot, color=''):
self.bot = bot # type: Bot
self.pipe = self.bot.pipe # type: PipeToJava
self.current_id = 0
self.bank_info = {}
self.new_hunt_timer = 0
self.color = color
self.end_color = '\033[0m'
self.hdv_opended = False
def add_command(self, command, parameters=None):
# <botInstance>;<msgId>;<dest>;<msgType>;<command>;[param1, param2...]
message = '{};{};i;cmd;{};{}\r\n'.format(self.bot.id, self.current_id, command, parameters)
self.bot.llf.log(self.bot, '[Interface {}] Sending : {}'.format(self.bot.id, message.strip()))
self.current_id += 1
self.pipe.p.stdin.write(bytes(message, 'utf-8'))
self.pipe.p.stdin.flush()
return self.current_id-1
def wait_for_return(self, message_id, timeout=5*60):
# print('[Interface] Waiting for response...')
ret_val = None
message_queue = []
start = time.time()
while ret_val is None and time.time()-start < timeout:
partial_message = '{};{};m;rtn'.format(self.bot.id, message_id)
buffer = self.pipe.get_buffer()
start = time.time() if self.bot.in_fight else start # prevent timeout if in fight
for message in buffer:
if int(message.split(';')[0]) == self.bot.id and message not in message_queue:
self.bot.llf.log(self.bot, '[Interface {}] Recieved : {}'.format(self.bot.id, message))
message_queue.append(message)
for message in message_queue:
if partial_message in message and not self.bot.in_fight:
ret_val = ast.literal_eval(message.split(';')[-1])
self.pipe.remove_from_buffer(self.bot.id, int(message.split(';')[1]))
del message_queue[message_queue.index(message)]
elif 'info;combat;["start"]' in message:
self.bot.llf.log(self.bot, '[Fight {}] Started'.format(self.bot.id))
start_fight = time.time()
self.bot.in_fight = True
self.pipe.remove_from_buffer(self.bot.id, int(message.split(';')[1]))
del message_queue[message_queue.index(message)]
elif 'info;combat;["end"]' in message:
self.bot.llf.log(self.bot, '[Fight {}] Ended in {} mins'.format(self.bot.id, round((time.time()-start_fight)/60, 1)))
self.bot.in_fight = False
self.pipe.remove_from_buffer(self.bot.id, int(message.split(';')[1]))
del message_queue[message_queue.index(message)]
self.get_player_stats()
elif 'info;disconnect;[True]' in message:
self.bot.llf.log(self.bot, '[Interface {}] Disconnected'.format(self.bot.id))
self.bot.connected = False
self.pipe.remove_from_buffer(self.bot.id, int(message.split(';')[1]))
del message_queue[message_queue.index(message)]
self.connect()
time.sleep(0.1)
if not self.bot.in_fight and ret_val is not None:
# print('[Interface] Recieved : ', ret_val)
return tuple(ret_val)
else:
print('[Interface] Request timed out')
raise Exception('Request timed out')
def execute_command(self, command, parameters=None):
"""
Executes interface commands, logs errors
:param command: command to send
:param parameters: params for the command
:return: return value form interface
"""
try:
msg_id = self.add_command(command, parameters)
return self.wait_for_return(msg_id)
except Exception as e:
self.bot.llf.log(self.bot, '[Interface {}] ERROR : \n{}'.format(self.bot.id, e.args))
with open('../Utils/InterfaceErrors.txt', 'a') as f:
f.write('\n\n' + str(datetime.datetime.now()) + '\n')
f.write(traceback.format_exc())
time.sleep(60)
def connect(self, max_tries=5):
"""
Connects a bot instance
:return: Boolean/['Save']
"""
connection_param = [
self.bot.credentials['username'],
self.bot.credentials['password'],
self.bot.credentials['name'],
self.bot.credentials['server']
]
tries, banned = 0, False
while not self.bot.connected and tries < max_tries and not banned:
self.bot.occupation = 'Connecting'
self.bot.hf.update_db()
ret_val = self.execute_command('connect', connection_param)[0]
tries += 1
self.bot.connected = True if ret_val is True else False
banned = True if ret_val == 'Banned' else False
if self.bot.connected:
self.get_player_stats()
self.get_sub_left()
current_map, current_cell, current_worldmap, map_id = self.bot.interface.get_map()
self.bot.position = (current_map, current_worldmap)
dd_stats = self.get_dd_stat()
if dd_stats[0]:
self.bot.mount = 'equipped'
self.mount_dd()
if dd_stats[0] < 100:
self.set_dd_xp(90)
else:
self.set_dd_xp(0)
else:
self.bot.mount = self.bot.llf.get_mount_situation(self.bot.credentials['name'])
if self.bot.mount == 'resting':
self.bot.hf.fetch_bot_mobile()
return [True]
elif banned:
self.bot.llf.set_banned(self.bot.credentials['name'])
else:
time.sleep(max(15, tries*30))
return [False]
def disconnect(self):
"""
Disconnects the bot instance
:return:
"""
success = [False]
if self.bot.connected:
dd_stats = self.bot.interface.get_dd_stat()
if dd_stats[0]:
level, energy, idx = dd_stats
if energy < 1000:
self.bot.hf.drop_bot_mobile(idx)
success = [self.execute_command('disconnect')[0]]
if success[0]:
self.get_player_stats()
self.bot.llf.log(self.bot, '[Position {}] {}'.format(self.bot.id, 'OFFLINE'))
self.bot.connected = False
self.bot.occupation = 'Sleeping'
self.bot.hf.update_db()
self.bot.llf.push_log_file('../packetErrors.txt', 'PacketErrors')
return success
def get_map(self):
"""
Gets the map the player is on
:return: coords, cell, worldmap, mapID
"""
current_map, current_cell, current_worldmap, map_id = self.execute_command('getMap')
self.bot.position = (current_map, current_worldmap, current_cell)
self.bot.llf.log(self.bot, '[Position {}] {}'.format(self.bot.id, current_map))
return current_map, current_cell, current_worldmap, map_id
def move(self, cell):
"""
Moves the bot on a map
:param cell: target cell number
:return: Boolean
"""
return self.execute_command('move', [cell])
def change_map(self, cell, direction):
"""
Moves the bot to an adjacent map
:param cell: target cell number for map change
:param direction: cardnial direction as 'n', 's', 'w', 'e'
:return: Boolean
"""
return self.execute_command('changeMap', [cell, direction])
def get_map_resources(self):
"""
Gets the resources and their info for the map the player is on
:return: TODO
"""
return self.execute_command('getResources')
def get_player_stats(self):
"""
Get the bot player stats
:return: {"Weight": <>, "WeightMax": <>, "Lvl": <>, "Job": {"job_id": level, ...}}
"""
stats = self.execute_command('getStats')
stats = stats[0]
self.bot.inventory.kamas = stats['Inventory']['Kamas']
self.bot.inventory.items = stats['Inventory']['Items']
self.bot.characteristics.level = stats['Lvl']
self.bot.characteristics.health_percent = stats['Health']
self.bot.characteristics.xp = stats['Xp']
self.bot.characteristics.xp_next_level_floor = stats['XpNextLevelFloor']
self.bot.characteristics.weight = stats['Weight']
self.bot.characteristics.weight_max = stats['WeightMax']
self.bot.characteristics.jobs = stats['Job']
self.bot.characteristics.int = stats['Caracs']['Int']
self.bot.characteristics.agi = stats['Caracs']['Agi']
self.bot.characteristics.cha = stats['Caracs']['Cha']
self.bot.characteristics.fo = stats['Caracs']['Fo']
self.bot.characteristics.vi = stats['Caracs']['Vi']
self.bot.characteristics.sa = stats['Caracs']['Sa']
self.bot.characteristics.available_stat_points = stats['Caracs']['Available']
self.bot.inventory.equip_preferred_stuff()
if self.bot.characteristics.available_stat_points:
caracs_to_augment = self.bot.llf.get_caracs_to_augment(self.bot)
for carac in caracs_to_augment:
self.assign_carac_points(carac[0], carac[1])
return stats
def harvest_resource(self, cell):
"""
Harvests the resource on the cell given
:param cell: cell number
:return: [id, number_harvested, new_pods, max_pods], or combat or false
"""
ret_val = self.execute_command('harvest', [cell])
self.get_player_stats()
return ret_val
def move_harvest(self, cell_move, cell_resource):
"""
Moves to cell_move and harvests the resource on cell_resource
:param cell_move:
:param cell_resource:
:return:
"""
ret_val = self.execute_command('moveHarvest', [cell_move, cell_resource])
self.get_player_stats()
return ret_val
def go_to_astrub(self):
"""
Goes to Astrub and makes the player exit the building (should arrive at 6, -19, cell 397, worldmap 1)
:return: Boolean
"""
return self.execute_command('goAstrub')
def go_to_incarnam(self):
"""
Enters the building and uses the gate to go to Incarnam
:return: Boolean
"""
return self.execute_command('goIncarnam')
def get_bank_door(self):
return self.execute_command('getBankDoor')
def enter_bank(self):
"""
Enters the bank on the map if there is one
:return: Boolean
"""
bank_door_cell = self.get_bank_door()[0]
if bank_door_cell:
self.move(bank_door_cell)
return self.execute_command('goBank')
else:
return [False]
def open_bank(self):
"""
Opens bank
:return: items json / False
"""
bank_content = self.execute_command('openBank')
self.bank_info = bank_content[0]
return bank_content
def close_bank(self):
"""
Closes Bank
:return: Boolean
"""
self.bank_info = {}
return self.execute_command('closeBank')
def exit_bank(self):
"""
Exits bank
:return: Boolean
"""
banks = {
"(4, -18)": 409,
"(-31, -54)": 409,
"(-27, 35)": 409,
"(2, -2)": 410,
"(14, 25)": 480
}
return self.move(banks[str(self.bot.position[0])])
def drop_in_bank_list(self, item_id_list):
"""
Drops some items in bank
:param item_id_list: [ItemID1, ItemID2...] / ['all'] Ids are inventory ids
:return: New bank content, new inventory content
"""
if item_id_list in ['All', 'all']:
inventory_content, bank_content = self.execute_command('dropBankAll')
else:
inventory_content, bank_content = self.execute_command('dropBankList', item_id_list)
self.bank_info = bank_content
self.get_player_stats()
return inventory_content, bank_content
def drop_in_bank_unique(self, item_id, quantity):
"""
Drops a certain quantity of a certain item in inventory
:param item_id: Item unique inventory id
:param quantity: quantity of item to drop
:return: New bank content, new inventory content
"""
inventory_content, bank_content = self.execute_command('dropBank', [item_id, quantity])
self.bank_info = bank_content
self.get_player_stats()
return inventory_content, bank_content
def get_from_bank_list(self, item_id_list):
"""
Retrieves some items in bank
:param item_id_list: [ItemID1, ItemID2...] / ['All']
:return: New bank content, new inventory content
"""
inventory_content, bank_content = self.execute_command('getBankList', item_id_list)
self.bank_info = bank_content
self.get_player_stats()
return inventory_content, bank_content
def get_from_bank_unique(self, item_id, quantity):
"""
Retrieves a certain quantity of a certain item in bank
:param item_id: Item unique inventory id
:param quantity: quantity of item to retrieve
:return: New bank content, new inventory content
"""
inventory_content, bank_content = self.execute_command('getBank', [item_id, quantity])
self.bank_info = bank_content
self.get_player_stats()
return inventory_content, bank_content
def put_kamas_in_bank(self, quantity):
"""
Drops a specified quantity of kamas in bank
:param quantity: quantity of kamas to drop
:return: New bank content, new inventory content
"""
kamas = self.bot.inventory.kamas
if quantity in ['all', 'All'] or quantity > kamas:
quantity = kamas
inventory_content, bank_content = self.execute_command('dropBankKamas', [quantity])
self.bank_info = bank_content
self.get_player_stats()
return inventory_content, bank_content
def get_kamas_from_bank(self, quantity):
"""
Retrieves a specified quantity of kamas from bank
:param quantity: quantity of kamas to drop
:return: New bank content, new inventory content
"""
kamas = self.bank_info['Kamas']
if quantity in ['all', 'All'] or quantity > kamas:
quantity = kamas
if quantity:
inventory_content, bank_content = self.execute_command('getBankKamas', [quantity])
self.bank_info = bank_content
self.get_player_stats()
return inventory_content, bank_content
def get_hunting_hall_door_cell(self):
"""
Returns the cell id of the hunting hall door, or | |
<filename>scripts/msct_register.py<gh_stars>0
#!/usr/bin/env python
#########################################################################################
# Various modules for registration.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: <NAME>, <NAME>
#
# License: see the LICENSE.TXT
#########################################################################################
# TODO: before running the PCA, correct for the "stretch" effect caused by curvature
# TODO: columnwise: check inverse field
# TODO: columnwise: add regularization: should not binarize at 0.5, especially problematic for edge (because division by zero to compute Sx, Sy).
# TODO: remove register2d_centermass and generalize register2d_centermassrot
# TODO: add flag for setting threshold on PCA
# TODO: clean code for generate_warping_field (unify with centermass_rot)
from __future__ import division, absolute_import
import sys, os, shutil, logging
from math import asin, cos, sin, acos
import numpy as np
from scipy import ndimage
from scipy.io import loadmat
from nibabel import load, Nifti1Image, save
from spinalcordtoolbox.image import Image
import sct_utils as sct
from sct_convert import convert
from sct_register_multimodal import Paramreg
logger = logging.getLogger(__name__)
def register_slicewise(fname_src,
fname_dest,
fname_mask='',
warp_forward_out='step0Warp.nii.gz',
warp_inverse_out='step0InverseWarp.nii.gz',
paramreg=None,
ants_registration_params=None,
path_qc='./',
remove_temp_files=0,
verbose=0):
im_and_seg = (paramreg.algo == 'centermassrot') and (paramreg.rot_method == 'hog') # bool for simplicity
# future contributor wanting to implement a method that use both im and seg will add: and (paramreg.rot_method == 'OTHER_METHOD')
if im_and_seg is True:
fname_src_im = fname_src[0]
fname_dest_im = fname_dest[0]
fname_src_seg = fname_src[1]
fname_dest_seg = fname_dest[1]
del fname_src
del fname_dest # to be sure it is not missused later
# create temporary folder
path_tmp = sct.tmp_create(basename="register", verbose=verbose)
# copy data to temp folder
sct.printv('\nCopy input data to temp folder...', verbose)
if im_and_seg is False:
convert(fname_src, os.path.join(path_tmp, "src.nii"))
convert(fname_dest, os.path.join(path_tmp, "dest.nii"))
else:
convert(fname_src_im, os.path.join(path_tmp, "src_im.nii"))
convert(fname_dest_im, os.path.join(path_tmp, "dest_im.nii"))
convert(fname_src_seg, os.path.join(path_tmp, "src_seg.nii"))
convert(fname_dest_seg, os.path.join(path_tmp, "dest_seg.nii"))
if fname_mask != '':
convert(fname_mask, os.path.join(path_tmp, "mask.nii.gz"))
# go to temporary folder
curdir = os.getcwd()
os.chdir(path_tmp)
# Calculate displacement
if paramreg.algo == 'centermass':
# translation of center of mass between source and destination in voxel space
register2d_centermassrot('src.nii', 'dest.nii', fname_warp=warp_forward_out, fname_warp_inv=warp_inverse_out, rot=0, polydeg=int(paramreg.poly), path_qc=path_qc, verbose=verbose)
elif paramreg.algo == 'centermassrot':
if im_and_seg is False:
# translation of center of mass and rotation based on source and destination first eigenvectors from PCA.
register2d_centermassrot('src.nii', 'dest.nii', fname_warp=warp_forward_out, fname_warp_inv=warp_inverse_out, rot=1, polydeg=int(paramreg.poly), path_qc=path_qc, verbose=verbose, pca_eigenratio_th=float(paramreg.pca_eigenratio_th))
else:
# translation based of center of mass and rotation based on the symmetry of the image
register2d_centermassrot(['src_im.nii','src_seg.nii'], ['dest_im.nii', 'dest_seg.nii'], fname_warp=warp_forward_out,
fname_warp_inv=warp_inverse_out, rot=2, polydeg=int(paramreg.poly),
path_qc=path_qc, verbose=verbose)
elif paramreg.algo == 'columnwise':
# scaling R-L, then column-wise center of mass alignment and scaling
register2d_columnwise('src.nii', 'dest.nii', fname_warp=warp_forward_out, fname_warp_inv=warp_inverse_out, verbose=verbose, path_qc=path_qc, smoothWarpXY=int(paramreg.smoothWarpXY))
else:
# convert SCT flags into ANTs-compatible flags
algo_dic = {'translation': 'Translation', 'rigid': 'Rigid', 'affine': 'Affine', 'syn': 'SyN', 'bsplinesyn': 'BSplineSyN', 'centermass': 'centermass'}
paramreg.algo = algo_dic[paramreg.algo]
# run slicewise registration
register2d('src.nii', 'dest.nii', fname_mask=fname_mask, fname_warp=warp_forward_out, fname_warp_inv=warp_inverse_out, paramreg=paramreg, ants_registration_params=ants_registration_params, verbose=verbose)
sct.printv('\nMove warping fields...', verbose)
sct.copy(warp_forward_out, curdir)
sct.copy(warp_inverse_out, curdir)
# go back
os.chdir(curdir)
if remove_temp_files:
sct.rmtree(path_tmp, verbose=verbose)
def register2d_centermassrot(fname_src, fname_dest, fname_warp='warp_forward.nii.gz', fname_warp_inv='warp_inverse.nii.gz', rot=1, polydeg=0, path_qc='./', verbose=0, pca_eigenratio_th=1.6):
"""
Rotate the source image to match the orientation of the destination image, using the first and second eigenvector
of the PCA. This function should be used on segmentations (not images).
This works for 2D and 3D images. If 3D, it splits the image and performs the rotation slice-by-slice.
input:
fname_source: name of moving image (type: string), if rot == 2, this needs to be a list with the first element
being the image fname and the second the segmentation fname
fname_dest: name of fixed image (type: string), if rot == 2, needs to be a list
fname_warp: name of output 3d forward warping field
fname_warp_inv: name of output 3d inverse warping field
rot: estimate rotation with pca (=1), hog (=2) or no rotation (=0) Default = 1
Depending on the rotation method, input might be segmentation only or image and segmentation
polydeg: degree of polynomial regularization along z for rotation angle (type: int). 0: no regularization
verbose:
output:
none
"""
if rot == 2: # if following methods need im and seg, add "and rot == x"
fname_src_im = fname_src[0]
fname_dest_im = fname_dest[0]
fname_src_seg = fname_src[1]
fname_dest_seg = fname_dest[1]
del fname_src
del fname_dest # to be sure it is not missused later
if verbose == 2:
import matplotlib
matplotlib.use('Agg') # prevent display figure
import matplotlib.pyplot as plt
# Get image dimensions and retrieve nz
sct.printv('\nGet image dimensions of destination image...', verbose)
if rot == 1 or rot == 0:
nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest).dim
else:
nx, ny, nz, nt, px, py, pz, pt = Image(fname_dest_im).dim
sct.printv(' matrix size: ' + str(nx) + ' x ' + str(ny) + ' x ' + str(nz), verbose)
sct.printv(' voxel size: ' + str(px) + 'mm x ' + str(py) + 'mm x ' + str(pz) + 'mm', verbose)
if rot == 1 or rot == 0:
# Split source volume along z
sct.printv('\nSplit input volume...', verbose)
from sct_image import split_data
im_src = Image('src.nii')
split_source_list = split_data(im_src, 2)
for im in split_source_list:
im.save()
# Split destination volume along z
sct.printv('\nSplit destination volume...', verbose)
im_dest = Image('dest.nii')
split_dest_list = split_data(im_dest, 2)
for im in split_dest_list:
im.save()
# display image
data_src = im_src.data
data_dest = im_dest.data
if len(data_src.shape) == 2:
# reshape 2D data into pseudo 3D (only one slice)
new_shape = list(data_src.shape)
new_shape.append(1)
new_shape = tuple(new_shape)
data_src = data_src.reshape(new_shape)
data_dest = data_dest.reshape(new_shape)
elif rot == 2: # im and seg case
# Split source volume along z
sct.printv('\nSplit input volume...', verbose)
from sct_image import split_data
im_src_im = Image('src_im.nii')
split_source_list = split_data(im_src_im, 2)
for im in split_source_list:
im.save()
im_src_seg = Image('src_seg.nii')
split_source_list = split_data(im_src_seg, 2)
for im in split_source_list:
im.save()
# Split destination volume along z
sct.printv('\nSplit destination volume...', verbose)
im_dest_im = Image('dest_im.nii')
split_dest_list = split_data(im_dest_im, 2)
for im in split_dest_list:
im.save()
im_dest_seg = Image('dest_seg.nii')
split_dest_list = split_data(im_dest_seg, 2)
for im in split_dest_list:
im.save()
# display image
data_src_im = im_src_im.data
data_dest_im = im_dest_im.data
data_src_seg = im_src_seg.data
data_dest_seg = im_dest_seg.data
else:
raise ValueError("rot param == " + str(rot) + " not implemented")
# initialize displacement and rotation
coord_src = [None] * nz
pca_src = [None] * nz
coord_dest = [None] * nz
pca_dest = [None] * nz
centermass_src = np.zeros([nz, 2])
centermass_dest = np.zeros([nz, 2])
# displacement_forward = np.zeros([nz, 2])
# displacement_inverse = np.zeros([nz, 2])
angle_src_dest = np.zeros(nz)
z_nonzero = []
if rot == 1 or rot == 0:
# Loop across slices
for iz in range(0, nz):
try:
# compute PCA and get center or mass
coord_src[iz], pca_src[iz], centermass_src[iz, :] = compute_pca(data_src[:, :, iz])
coord_dest[iz], pca_dest[iz], centermass_dest[iz, :] = compute_pca(data_dest[:, :, iz])
# compute (src,dest) angle for first eigenvector
if rot == 1:
eigenv_src = pca_src[iz].components_.T[0][0], pca_src[iz].components_.T[1][0] # pca_src.components_.T[0]
eigenv_dest = pca_dest[iz].components_.T[0][0], pca_dest[iz].components_.T[1][0] # pca_dest.components_.T[0]
# Make sure first element is always positive (to prevent sign flipping)
if eigenv_src[0] <= 0:
eigenv_src = tuple([i * (-1) for i in eigenv_src])
if eigenv_dest[0] <= 0:
eigenv_dest = tuple([i * (-1) for i in eigenv_dest])
angle_src_dest[iz] = angle_between(eigenv_src, eigenv_dest)
# check if ratio between the two eigenvectors is high enough to prevent poor robustness
if pca_src[iz].explained_variance_ratio_[0] / pca_src[iz].explained_variance_ratio_[1] < pca_eigenratio_th:
angle_src_dest[iz] = 0
if pca_dest[iz].explained_variance_ratio_[0] / pca_dest[iz].explained_variance_ratio_[1] < pca_eigenratio_th:
angle_src_dest[iz] = 0
# append to list of z_nonzero
z_nonzero.append(iz)
# if one of the slice is empty, ignore it
except ValueError:
sct.printv('WARNING: Slice #' + str(iz) + ' is empty. It will be ignored.', verbose, 'warning')
elif rot == 2: # im and seg case
raise NotImplementedError("This method is not implemented yet, it will be in a future version")
# for iz in range(0, nz):
# try:
# _, _, centermass_src[iz, :] = compute_pca(data_src_seg[:, :, iz])
# _, _, centermass_dest[iz, :] = compute_pca(data_dest_seg[:, :, iz])
#
# # TODO: Here will be put the new method to find the angle
#
# #angle_src = find_angle(data_src_im[:, :, iz], centermass_src[iz, :], parameters)
# #angle_dest = find_angle(data_dest_im[:, :, iz], centermass_dest[iz, :], parameters)
#
# # if (angle_src is None) or (angle_dest is None):
# # sct.printv('WARNING: Slice #' + str(iz) + ' no angle found in dest or src. It will be ignored.', verbose, 'warning')
# # continue
#
# # angle_src_dest[iz] = angle_src-angle_dest
#
# except | |
return self.getErrorItem("Value should be set")
value = properties[propertyName]
defaultValue = recommendedDefaults[propertyName]
if defaultValue is None:
return self.getErrorItem("Config's default value can't be null or undefined")
if not checkXmxValueFormat(value) and checkXmxValueFormat(defaultValue):
# Xmx is in the default-value but not the value, should be an error
return self.getErrorItem('Invalid value format')
if not checkXmxValueFormat(defaultValue):
# if default value does not contain Xmx, then there is no point in validating existing value
return None
valueInt = formatXmxSizeToBytes(getXmxSize(value))
defaultValueXmx = getXmxSize(defaultValue)
defaultValueInt = formatXmxSizeToBytes(defaultValueXmx)
if valueInt < defaultValueInt:
return self.getWarnItem("Value is less than the recommended default of -Xmx" + defaultValueXmx)
return None
def validateMapReduce2Configurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [ {"config-name": 'mapreduce.map.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.map.java.opts')},
{"config-name": 'mapreduce.reduce.java.opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'mapreduce.reduce.java.opts')},
{"config-name": 'mapreduce.task.io.sort.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.task.io.sort.mb')},
{"config-name": 'mapreduce.map.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.map.memory.mb')},
{"config-name": 'mapreduce.reduce.memory.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'mapreduce.reduce.memory.mb')},
{"config-name": 'yarn.app.mapreduce.am.resource.mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.resource.mb')},
{"config-name": 'yarn.app.mapreduce.am.command-opts', "item": self.validateXmxValue(properties, recommendedDefaults, 'yarn.app.mapreduce.am.command-opts')},
{"config-name": 'mapreduce.job.queuename', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'mapreduce.job.queuename', services)} ]
return self.toConfigurationValidationProblems(validationItems, "mapred-site")
def validateYARNConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
clusterEnv = getSiteProperties(configurations, "cluster-env")
validationItems = [ {"config-name": 'yarn.nodemanager.resource.memory-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.nodemanager.resource.memory-mb')},
{"config-name": 'yarn.scheduler.minimum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.minimum-allocation-mb')},
{"config-name": 'yarn.nodemanager.linux-container-executor.group', "item": self.validatorEqualsPropertyItem(properties, "yarn.nodemanager.linux-container-executor.group", clusterEnv, "user_group")},
{"config-name": 'yarn.scheduler.maximum-allocation-mb', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'yarn.scheduler.maximum-allocation-mb')} ]
return self.toConfigurationValidationProblems(validationItems, "yarn-site")
def validateYARNEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [{"config-name": 'service_check.queue.name', "item": self.validatorYarnQueue(properties, recommendedDefaults, 'service_check.queue.name', services)} ]
return self.toConfigurationValidationProblems(validationItems, "yarn-env")
def validateHbaseEnvConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
hbase_site = getSiteProperties(configurations, "hbase-site")
validationItems = [ {"config-name": 'hbase_regionserver_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_regionserver_heapsize')},
{"config-name": 'hbase_master_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'hbase_master_heapsize')},
{"config-name": "hbase_user", "item": self.validatorEqualsPropertyItem(properties, "hbase_user", hbase_site, "hbase.superuser")} ]
return self.toConfigurationValidationProblems(validationItems, "hbase-env")
def validateHDFSConfigurations(self, properties, recommendedDefaults, configurations, services, hosts):
clusterEnv = getSiteProperties(configurations, "cluster-env")
validationItems = [{"config-name": 'dfs.datanode.du.reserved', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'dfs.datanode.du.reserved')},
{"config-name": 'dfs.datanode.data.dir', "item": self.validatorOneDataDirPerPartition(properties, 'dfs.datanode.data.dir', services, hosts, clusterEnv)}]
return self.toConfigurationValidationProblems(validationItems, "hdfs-site")
def validateHDFSConfigurationsEnv(self, properties, recommendedDefaults, configurations, services, hosts):
validationItems = [ {"config-name": 'namenode_heapsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_heapsize')},
{"config-name": 'namenode_opt_newsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_newsize')},
{"config-name": 'namenode_opt_maxnewsize', "item": self.validatorLessThenDefaultValue(properties, recommendedDefaults, 'namenode_opt_maxnewsize')}]
return self.toConfigurationValidationProblems(validationItems, "hadoop-env")
def validatorOneDataDirPerPartition(self, properties, propertyName, services, hosts, clusterEnv):
if not propertyName in properties:
return self.getErrorItem("Value should be set")
dirs = properties[propertyName]
if not (clusterEnv and "one_dir_per_partition" in clusterEnv and clusterEnv["one_dir_per_partition"].lower() == "true"):
return None
dataNodeHosts = self.getDataNodeHosts(services, hosts)
warnings = set()
for host in dataNodeHosts:
hostName = host["Hosts"]["host_name"]
mountPoints = []
for diskInfo in host["Hosts"]["disk_info"]:
mountPoints.append(diskInfo["mountpoint"])
if get_mounts_with_multiple_data_dirs(mountPoints, dirs):
# A detailed message can be too long on large clusters:
# warnings.append("Host: " + hostName + "; Mount: " + mountPoint + "; Data directories: " + ", ".join(dirList))
warnings.add(hostName)
break;
if len(warnings) > 0:
return self.getWarnItem("cluster-env/one_dir_per_partition is enabled but there are multiple data directories on the same mount. Affected hosts: {0}".format(", ".join(sorted(warnings))))
return None
"""
Returns the list of Data Node hosts.
"""
def getDataNodeHosts(self, services, hosts):
if len(hosts["items"]) > 0:
dataNodeHosts = self.getHostsWithComponent("HDFS", "DATANODE", services, hosts)
if dataNodeHosts is not None:
return dataNodeHosts
return []
def getMastersWithMultipleInstances(self):
return ['ZOOKEEPER_SERVER', 'HBASE_MASTER']
def getNotValuableComponents(self):
return ['JOURNALNODE', 'ZKFC', 'GANGLIA_MONITOR']
def getNotPreferableOnServerComponents(self):
return ['GANGLIA_SERVER', 'METRICS_COLLECTOR']
def getCardinalitiesDict(self,host):
return {
'ZOOKEEPER_SERVER': {"min": 3},
'HBASE_MASTER': {"min": 1},
}
def getComponentLayoutSchemes(self):
return {
'NAMENODE': {"else": 0},
'SECONDARY_NAMENODE': {"else": 1},
'HBASE_MASTER': {6: 0, 31: 2, "else": 3},
'HISTORYSERVER': {31: 1, "else": 2},
'RESOURCEMANAGER': {31: 1, "else": 2},
'OOZIE_SERVER': {6: 1, 31: 2, "else": 3},
'HIVE_SERVER': {6: 1, 31: 2, "else": 4},
'HIVE_METASTORE': {6: 1, 31: 2, "else": 4},
'WEBHCAT_SERVER': {6: 1, 31: 2, "else": 4},
'METRICS_COLLECTOR': {3: 2, 6: 2, 31: 3, "else": 5},
}
def get_system_min_uid(self):
login_defs = '/etc/login.defs'
uid_min_tag = 'UID_MIN'
comment_tag = '#'
uid_min = uid_default = '1000'
uid = None
if os.path.exists(login_defs):
with open(login_defs, 'r') as f:
data = f.read().split('\n')
# look for uid_min_tag in file
uid = filter(lambda x: uid_min_tag in x, data)
# filter all lines, where uid_min_tag was found in comments
uid = filter(lambda x: x.find(comment_tag) > x.find(uid_min_tag) or x.find(comment_tag) == -1, uid)
if uid is not None and len(uid) > 0:
uid = uid[0]
comment = uid.find(comment_tag)
tag = uid.find(uid_min_tag)
if comment == -1:
uid_tag = tag + len(uid_min_tag)
uid_min = uid[uid_tag:].strip()
elif comment > tag:
uid_tag = tag + len(uid_min_tag)
uid_min = uid[uid_tag:comment].strip()
# check result for value
try:
int(uid_min)
except ValueError:
return uid_default
return uid_min
def mergeValidators(self, parentValidators, childValidators):
for service, configsDict in childValidators.iteritems():
if service not in parentValidators:
parentValidators[service] = {}
parentValidators[service].update(configsDict)
def checkSiteProperties(self, siteProperties, *propertyNames):
"""
Check if properties defined in site properties.
:param siteProperties: config properties dict
:param *propertyNames: property names to validate
:returns: True if all properties defined, in other cases returns False
"""
if siteProperties is None:
return False
for name in propertyNames:
if not (name in siteProperties):
return False
return True
"""
Returns the dictionary of configs for 'capacity-scheduler'.
"""
def getCapacitySchedulerProperties(self, services):
capacity_scheduler_properties = dict()
received_as_key_value_pair = True
if "capacity-scheduler" in services['configurations']:
if "capacity-scheduler" in services['configurations']["capacity-scheduler"]["properties"]:
cap_sched_props_as_str = services['configurations']["capacity-scheduler"]["properties"]["capacity-scheduler"]
if cap_sched_props_as_str:
cap_sched_props_as_str = str(cap_sched_props_as_str).split('\n')
if len(cap_sched_props_as_str) > 0 and cap_sched_props_as_str[0] != 'null':
# Received confgs as one "\n" separated string
for property in cap_sched_props_as_str:
key, sep, value = property.partition("=")
capacity_scheduler_properties[key] = value
self.logger.info("'capacity-scheduler' configs is passed-in as a single '\\n' separated string. "
"count(services['configurations']['capacity-scheduler']['properties']['capacity-scheduler']) = "
"{0}".format(len(capacity_scheduler_properties)))
received_as_key_value_pair = False
else:
self.logger.info("Passed-in services['configurations']['capacity-scheduler']['properties']['capacity-scheduler'] is 'null'.")
else:
self.logger.info("'capacity-schdeuler' configs not passed-in as single '\\n' string in "
"services['configurations']['capacity-scheduler']['properties']['capacity-scheduler'].")
if not capacity_scheduler_properties:
# Received configs as a dictionary (Generally on 1st invocation).
capacity_scheduler_properties = services['configurations']["capacity-scheduler"]["properties"]
self.logger.info("'capacity-scheduler' configs is passed-in as a dictionary. "
"count(services['configurations']['capacity-scheduler']['properties']) = {0}".format(len(capacity_scheduler_properties)))
else:
self.logger.error("Couldn't retrieve 'capacity-scheduler' from services.")
self.logger.info("Retrieved 'capacity-scheduler' received as dictionary : '{0}'. configs : {1}" \
.format(received_as_key_value_pair, capacity_scheduler_properties.items()))
return capacity_scheduler_properties, received_as_key_value_pair
"""
Gets all YARN leaf queues.
"""
def getAllYarnLeafQueues(self, capacitySchedulerProperties):
config_list = capacitySchedulerProperties.keys()
yarn_queues = None
leafQueueNames = set()
if 'yarn.scheduler.capacity.root.queues' in config_list:
yarn_queues = capacitySchedulerProperties.get('yarn.scheduler.capacity.root.queues')
if yarn_queues:
toProcessQueues = yarn_queues.split(",")
while len(toProcessQueues) > 0:
queue = toProcessQueues.pop()
queueKey = "yarn.scheduler.capacity.root." + queue + ".queues"
if queueKey in capacitySchedulerProperties:
# If parent queue, add children
subQueues = capacitySchedulerProperties[queueKey].split(",")
for subQueue in subQueues:
toProcessQueues.append(queue + "." + subQueue)
else:
# Leaf queues
# We only take the leaf queue name instead of the complete path, as leaf queue names are unique in YARN.
# Eg: If YARN queues are like :
# (1). 'yarn.scheduler.capacity.root.a1.b1.c1.d1',
# (2). 'yarn.scheduler.capacity.root.a1.b1.c2',
# (3). 'yarn.scheduler.capacity.root.default,
# Added leaf queues names are as : d1, c2 and default for the 3 leaf queues.
leafQueuePathSplits = queue.split(".")
if leafQueuePathSplits > 0:
leafQueueName = leafQueuePathSplits[-1]
leafQueueNames.add(leafQueueName)
return leafQueueNames
def get_service_component_meta(self, service, component, services):
"""
Function retrieve service component meta information as dict from services.json
If no service or component found, would be returned empty dict
Return value example:
"advertise_version" : true,
"bulk_commands_display_name" : "",
"bulk_commands_master_component_name" : "",
"cardinality" : "1+",
"component_category" : "CLIENT",
"component_name" : "HBASE_CLIENT",
"custom_commands" : [ ],
"decommission_allowed" : false,
"display_name" : "HBase Client",
"has_bulk_commands_definition" : false,
"is_client" : true,
"is_master" : false,
"reassign_allowed" : false,
"recovery_enabled" : false,
"service_name" : "HBASE",
"stack_name" : "HDP",
"stack_version" : "2.5",
"hostnames" : [ "host1", "host2" ]
:type service str
:type component str
:type services dict
:rtype dict
"""
__stack_services = "StackServices"
__stack_service_components = "StackServiceComponents"
if not services:
return {}
service_meta = [item for item in services["services"] if item[__stack_services]["service_name"] == service]
if len(service_meta) == 0:
return {}
service_meta = service_meta[0]
component_meta = [item for item in service_meta["components"] if item[__stack_service_components]["component_name"] == component]
if len(component_meta) == 0:
return {}
return component_meta[0][__stack_service_components]
def is_secured_cluster(self, services):
"""
Detects if cluster is secured or not
:type services dict
:rtype bool
"""
return services and "cluster-env" in services["configurations"] and\
"security_enabled" in services["configurations"]["cluster-env"]["properties"] and\
services["configurations"]["cluster-env"]["properties"]["security_enabled"].lower() == "true"
def get_services_list(self, services):
"""
Returns available services as list
:type services dict
:rtype list
"""
if not services:
return []
return [service["StackServices"]["service_name"] for service in services["services"]]
def get_components_list(self, service, services):
"""
Return list of components for specific service
:type service str
:type services dict
:rtype list
"""
__stack_services = "StackServices"
__stack_service_components = "StackServiceComponents"
if not services:
return []
service_meta = [item for item | |
"""This module tests SyntaxErrors.
Here's an example of the sort of thing that is tested.
>>> def f(x):
... global x
Traceback (most recent call last):
SyntaxError: name 'x' is local and global (<doctest test.test_syntax[0]>, line 1)
The tests are all raise SyntaxErrors. They were created by checking
each C call that raises SyntaxError. There are several modules that
raise these exceptions-- ast.c, compile.c, future.c, pythonrun.c, and
symtable.c.
The parser itself outlaws a lot of invalid syntax. None of these
errors are tested here at the moment. We should add some tests; since
there are infinitely many programs with invalid syntax, we would need
to be judicious in selecting some.
The compiler generates a synthetic module name for code executed by
doctest. Since all the code comes from the same module, a suffix like
[1] is appended to the module name, As a consequence, changing the
order of tests in this module means renumbering all the errors after
it. (Maybe we should enable the ellipsis option for these tests.)
In ast.c, syntax errors are raised by calling ast_error().
Errors from set_context():
>>> obj.None = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[1]>", line 1
SyntaxError: cannot assign to None
>>> None = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[2]>", line 1
SyntaxError: cannot assign to None
It's a syntax error to assign to the empty tuple. Why isn't it an
error to assign to the empty list? It will always raise some error at
runtime.
>>> () = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[3]>", line 1
SyntaxError: can't assign to ()
>>> f() = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[4]>", line 1
SyntaxError: can't assign to function call
>>> del f()
Traceback (most recent call last):
File "<doctest test.test_syntax[5]>", line 1
SyntaxError: can't delete function call
>>> a + 1 = 2
Traceback (most recent call last):
File "<doctest test.test_syntax[6]>", line 1
SyntaxError: can't assign to operator
>>> (x for x in x) = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[7]>", line 1
SyntaxError: can't assign to generator expression
>>> 1 = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[8]>", line 1
SyntaxError: can't assign to literal
>>> "abc" = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[8]>", line 1
SyntaxError: can't assign to literal
>>> `1` = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[10]>", line 1
SyntaxError: can't assign to repr
If the left-hand side of an assignment is a list or tuple, an illegal
expression inside that contain should still cause a syntax error.
This test just checks a couple of cases rather than enumerating all of
them.
>>> (a, "b", c) = (1, 2, 3)
Traceback (most recent call last):
File "<doctest test.test_syntax[11]>", line 1
SyntaxError: can't assign to literal
>>> [a, b, c + 1] = [1, 2, 3]
Traceback (most recent call last):
File "<doctest test.test_syntax[12]>", line 1
SyntaxError: can't assign to operator
>>> a if 1 else b = 1
Traceback (most recent call last):
File "<doctest test.test_syntax[13]>", line 1
SyntaxError: can't assign to conditional expression
From compiler_complex_args():
>>> def f(None=1):
... pass
Traceback (most recent call last):
File "<doctest test.test_syntax[14]>", line 1
SyntaxError: cannot assign to None
From ast_for_arguments():
>>> def f(x, y=1, z):
... pass
Traceback (most recent call last):
File "<doctest test.test_syntax[15]>", line 1
SyntaxError: non-default argument follows default argument
>>> def f(x, None):
... pass
Traceback (most recent call last):
File "<doctest test.test_syntax[16]>", line 1
SyntaxError: cannot assign to None
>>> def f(*None):
... pass
Traceback (most recent call last):
File "<doctest test.test_syntax[17]>", line 1
SyntaxError: cannot assign to None
>>> def f(**None):
... pass
Traceback (most recent call last):
File "<doctest test.test_syntax[18]>", line 1
SyntaxError: cannot assign to None
From ast_for_funcdef():
>>> def None(x):
... pass
Traceback (most recent call last):
File "<doctest test.test_syntax[19]>", line 1
SyntaxError: cannot assign to None
From ast_for_call():
>>> def f(it, *varargs):
... return list(it)
>>> L = range(10)
>>> f(x for x in L)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(x for x in L, 1)
Traceback (most recent call last):
File "<doctest test.test_syntax[23]>", line 1
SyntaxError: Generator expression must be parenthesized if not sole argument
>>> f((x for x in L), 1)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... i244, i245, i246, i247, i248, i249, i250, i251, i252,
... i253, i254, i255)
Traceback (most recent call last):
File "<doctest test.test_syntax[25]>", line 1
SyntaxError: more than 255 arguments
The actual error cases counts positional arguments, keyword arguments,
and generator expression arguments separately. This test combines the
three.
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... (x for x in i244), i245, i246, i247, i248, i249, i250, i251,
... i252=1, i253=1, i254=1, i255=1)
Traceback (most recent call last):
File "<doctest test.test_syntax[26]>", line 1
SyntaxError: more than 255 arguments
>>> f(lambda x: x[0] = 3)
Traceback (most recent call last):
File "<doctest test.test_syntax[27]>", line 1
SyntaxError: lambda cannot contain assignment
The grammar accepts any test (basically, any expression) in the
keyword slot of a call site. Test a few different options.
>>> f(x()=2)
Traceback (most recent call last):
File "<doctest test.test_syntax[28]>", line 1
SyntaxError: keyword can't be an expression
>>> f(a or b=1)
Traceback (most recent call last):
File "<doctest test.test_syntax[29]>", line 1
SyntaxError: keyword can't be an expression
>>> f(x.y=1)
Traceback (most recent call last):
File "<doctest test.test_syntax[30]>", line 1
SyntaxError: keyword can't be an expression
More set_context():
>>> (x for x in x) += 1
Traceback (most recent call last):
File "<doctest test.test_syntax[31]>", line 1
SyntaxError: can't assign to generator expression
>>> | |
library".format(showName), verbosity=self.logVerbosity)
currentShowValues = self.SearchTVLibrary(showName = showName)
if currentShowValues is None:
self._ActionDatabase("INSERT INTO TVLibrary (ShowName) VALUES (?)", (showName, ))
showID = self._ActionDatabase("SELECT (ShowID) FROM TVLibrary WHERE ShowName=?", (showName, ))[0][0]
return showID
else:
goodlogging.Log.Fatal("DB", "An entry for {0} already exists in the TV library".format(showName))
############################################################################
# UpdateShowDirInTVLibrary
############################################################################
def UpdateShowDirInTVLibrary(self, showID, showDir):
"""
Update show directory entry for given show id in TVLibrary table.
Parameters
----------
showID : int
Show id value.
showDir : string
Show directory name.
"""
goodlogging.Log.Info("DB", "Updating TV library for ShowID={0}: ShowDir={1}".format(showID, showDir))
self._ActionDatabase("UPDATE TVLibrary SET ShowDir=? WHERE ShowID=?", (showDir, showID))
############################################################################
# SearchTVLibrary
############################################################################
def SearchTVLibrary(self, showName = None, showID = None, showDir = None):
"""
Search TVLibrary table.
If none of the optonal arguments are given it looks up all entries of the
table, otherwise it will look up entries which match the given arguments.
Note that it only looks up based on one argument - if show directory is
given this will be used, otherwise show id will be used if it is given,
otherwise show name will be used.
Parameters
----------
showName : string [optional : default = None]
Show name.
showID : int [optional : default = None]
Show id value.
showDir : string [optional : default = None]
Show directory name.
Returns
----------
list or None
If no result is found this returns None otherwise it will return a the
result of the SQL query as a list. In the case that the result is expected
to be unique and multiple entries are return a fatal error will be raised.
"""
unique = True
if showName is None and showID is None and showDir is None:
goodlogging.Log.Info("DB", "Looking up all items in TV library", verbosity=self.logVerbosity)
queryString = "SELECT * FROM TVLibrary"
queryTuple = None
unique = False
elif showDir is not None:
goodlogging.Log.Info("DB", "Looking up from TV library where ShowDir is {0}".format(showDir), verbosity=self.logVerbosity)
queryString = "SELECT * FROM TVLibrary WHERE ShowDir=?"
queryTuple = (showDir, )
elif showID is not None:
goodlogging.Log.Info("DB", "Looking up from TV library where ShowID is {0}".format(showID), verbosity=self.logVerbosity)
queryString = "SELECT * FROM TVLibrary WHERE ShowID=?"
queryTuple = (showID, )
elif showName is not None:
goodlogging.Log.Info("DB", "Looking up from TV library where ShowName is {0}".format(showName), verbosity=self.logVerbosity)
queryString = "SELECT * FROM TVLibrary WHERE ShowName=?"
queryTuple = (showName, )
result = self._ActionDatabase(queryString, queryTuple, error = False)
if result is None:
return None
elif len(result) == 0:
return None
elif len(result) == 1:
goodlogging.Log.Info("DB", "Found match in TVLibrary: {0}".format(result), verbosity=self.logVerbosity)
return result
elif len(result) > 1:
if unique is True:
goodlogging.Log.Fatal("DB", "Database corrupted - multiple matches found in TV Library: {0}".format(result))
else:
goodlogging.Log.Info("DB", "Found multiple matches in TVLibrary: {0}".format(result), verbosity=self.logVerbosity)
return result
############################################################################
# SearchFileNameTable
############################################################################
def SearchFileNameTable(self, fileName):
"""
Search FileName table.
Find the show id for a given file name.
Parameters
----------
fileName : string
File name to look up in table.
Returns
----------
int or None
If a match is found in the database table the show id for this
entry is returned, otherwise this returns None.
"""
goodlogging.Log.Info("DB", "Looking up filename string '{0}' in database".format(fileName), verbosity=self.logVerbosity)
queryString = "SELECT ShowID FROM FileName WHERE FileName=?"
queryTuple = (fileName, )
result = self._ActionDatabase(queryString, queryTuple, error = False)
if result is None:
goodlogging.Log.Info("DB", "No match found in database for '{0}'".format(fileName), verbosity=self.logVerbosity)
return None
elif len(result) == 0:
return None
elif len(result) == 1:
goodlogging.Log.Info("DB", "Found file name match: {0}".format(result), verbosity=self.logVerbosity)
return result[0][0]
elif len(result) > 1:
goodlogging.Log.Fatal("DB", "Database corrupted - multiple matches found in database table for: {0}".format(result))
############################################################################
# AddFileNameTable
############################################################################
def AddToFileNameTable(self, fileName, showID):
"""
Add entry to FileName table. If the file name and show id combination
already exists in the table a fatal error is raised.
Parameters
----------
fileName : string
File name.
showID : int
Show id.
"""
goodlogging.Log.Info("DB", "Adding filename string match '{0}'={1} to database".format(fileName, showID), verbosity=self.logVerbosity)
currentValues = self.SearchFileNameTable(fileName)
if currentValues is None:
self._ActionDatabase("INSERT INTO FileName (FileName, ShowID) VALUES (?,?)", (fileName, showID))
else:
goodlogging.Log.Fatal("DB", "An entry for '{0}' already exists in the FileName table".format(fileName))
############################################################################
# SearchSeasonDirTable
############################################################################
def SearchSeasonDirTable(self, showID, seasonNum):
"""
Search SeasonDir table.
Find the season directory for a given show id and season combination.
Parameters
----------
showID : int
Show id for given show.
seasonNum : int
Season number.
Returns
----------
string or None
If no match is found this returns None, if a single match is found
then the season directory name value is returned. If multiple matches
are found a fatal error is raised.
"""
goodlogging.Log.Info("DB", "Looking up directory for ShowID={0} Season={1} in database".format(showID, seasonNum), verbosity=self.logVerbosity)
queryString = "SELECT SeasonDir FROM SeasonDir WHERE ShowID=? AND Season=?"
queryTuple = (showID, seasonNum)
result = self._ActionDatabase(queryString, queryTuple, error = False)
if result is None:
goodlogging.Log.Info("DB", "No match found in database", verbosity=self.logVerbosity)
return None
elif len(result) == 0:
return None
elif len(result) == 1:
goodlogging.Log.Info("DB", "Found database match: {0}".format(result), verbosity=self.logVerbosity)
return result[0][0]
elif len(result) > 1:
goodlogging.Log.Fatal("DB", "Database corrupted - multiple matches found in database table for: {0}".format(result))
############################################################################
# AddSeasonDirTable
############################################################################
def AddSeasonDirTable(self, showID, seasonNum, seasonDir):
"""
Add entry to SeasonDir table. If a different entry for season directory
is found for the given show id and season number combination this raises
a fatal error.
Parameters
----------
showID : int
Show id.
seasonNum : int
Season number.
seasonDir : string
Season directory name.
"""
goodlogging.Log.Info("DB", "Adding season directory ({0}) to database for ShowID={1}, Season={2}".format(seasonDir, showID, seasonNum), verbosity=self.logVerbosity)
currentValue = self.SearchSeasonDirTable(showID, seasonNum)
if currentValue is None:
self._ActionDatabase("INSERT INTO SeasonDir (ShowID, Season, SeasonDir) VALUES (?,?,?)", (showID, seasonNum, seasonDir))
else:
if currentValue == seasonDir:
goodlogging.Log.Info("DB", "A matching entry already exists in the SeasonDir table", verbosity=self.logVerbosity)
else:
goodlogging.Log.Fatal("DB", "A different entry already exists in the SeasonDir table")
############################################################################
# _PrintDatabaseTable
############################################################################
def _PrintDatabaseTable(self, tableName, rowSelect = None):
"""
Prints contents of database table. An optional argument (rowSelect) can be
given which contains a list of column names and values against which to
search, allowing a subset of the table to be printed.
Gets database column headings using PRAGMA call. Automatically adjusts
each column width based on the longest element that needs to be printed
Parameters
----------
tableName : int
Name of table to print.
rowSelect : list of tuples
A list of column names and values against to search against.
Returns:
int
The number of table rows printed.
"""
goodlogging.Log.Info("DB", "{0}".format(tableName))
goodlogging.Log.IncreaseIndent()
tableInfo = self._ActionDatabase("PRAGMA table_info({0})".format(tableName))
dbQuery = "SELECT * FROM {0}".format(tableName)
dbQueryParams = []
if rowSelect is not None:
dbQuery = dbQuery + " WHERE " + ' AND '.join(['{0}=?'.format(i) for i, j in rowSelect])
dbQueryParams = [j for i, j in rowSelect]
tableData = self._ActionDatabase(dbQuery, dbQueryParams)
columnCount = len(tableInfo)
columnWidths = [0]*columnCount
columnHeadings = []
for count, column in enumerate(tableInfo):
columnHeadings.append(column[1])
columnWidths[count] = len(column[1])
for row in tableData:
for count, column in enumerate(row):
if len(str(column)) > columnWidths[count]:
columnWidths[count] = len(column)
printStr = "|"
for count, column in enumerate(columnWidths):
printStr = printStr + " {{0[{0}]:{1}}} |".format(count, columnWidths[count])
goodlogging.Log.Info("DB", printStr.format(columnHeadings))
goodlogging.Log.Info("DB", "-"*(sum(columnWidths)+3*len(columnWidths)+1))
for row in tableData:
noneReplacedRow = ['-' if i is None else i for i in row]
goodlogging.Log.Info("DB", printStr.format(noneReplacedRow))
goodlogging.Log.DecreaseIndent()
goodlogging.Log.NewLine()
return len(tableData)
############################################################################
# PrintAllTables
############################################################################
def PrintAllTables(self):
""" Prints contents of every table. """
goodlogging.Log.Info("DB", "Database contents:\n")
for table in self._tableDict.keys():
self._PrintDatabaseTable(table)
############################################################################
# _UpdateDatabaseFromResponse
############################################################################
def _UpdateDatabaseFromResponse(self, response, mode):
"""
Update database table given a user input in the form
"TABLENAME COL1=VAL1 COL2=VAL2".
Either ADD or DELETE from table depending on mode argument.
If the change succeeds the updated table is printed to stdout.
Parameters
----------
response : string
User input.
mode : string
Valid values are 'ADD' or 'DEL'.
Returns
----------
None
Will always return None. There are numerous early returns in the cases
where the database update cannot proceed for any reason.
"""
# Get tableName from user input (form TABLENAME COL1=VAL1 COL2=VAL2 etc)
try:
tableName, | |
<gh_stars>0
from torch_struct import (
CKY,
CKY_CRF,
DepTree,
LinearChain,
SemiMarkov,
Alignment,
deptree_nonproj,
deptree_part,
)
from torch_struct import (
LogSemiring,
CheckpointSemiring,
CheckpointShardSemiring,
KMaxSemiring,
SparseMaxSemiring,
MaxSemiring,
StdSemiring,
EntropySemiring,
)
from .extensions import (
LinearChainTest,
SemiMarkovTest,
DepTreeTest,
CKYTest,
CKY_CRFTest,
test_lookup,
)
import torch
from hypothesis import given
from hypothesis.strategies import integers, data, sampled_from
import pytest
from hypothesis import settings
settings.register_profile("ci", max_examples=50, deadline=None)
settings.load_profile("ci")
smint = integers(min_value=2, max_value=4)
tint = integers(min_value=1, max_value=2)
lint = integers(min_value=2, max_value=10)
algorithms = {
"LinearChain": (LinearChain, LinearChainTest),
"SemiMarkov": (SemiMarkov, SemiMarkovTest),
"Dep": (DepTree, DepTreeTest),
"CKY_CRF": (CKY_CRF, CKY_CRFTest),
"CKY": (CKY, CKYTest),
}
class Gen:
"Helper class for tests"
def __init__(self, model_test, data, semiring):
model_test = algorithms[model_test]
self.data = data
self.model = model_test[0]
self.struct = self.model(semiring)
self.test = model_test[1]
self.vals, (self.batch, self.N) = data.draw(self.test.logpotentials())
# jitter
if not isinstance(self.vals, tuple):
self.vals = self.vals + 1e-6 * torch.rand(*self.vals.shape)
self.semiring = semiring
def enum(self, semiring=None):
return self.test.enumerate(
semiring if semiring is not None else self.semiring, self.vals
)
# Model specific tests.
@given(smint, smint, smint)
@settings(max_examples=50, deadline=None)
def test_linear_chain_counting(batch, N, C):
vals = torch.ones(batch, N, C, C)
semiring = StdSemiring
alpha = LinearChain(semiring).sum(vals)
c = pow(C, N + 1)
assert (alpha == c).all()
# Semiring tests
@given(data())
@pytest.mark.parametrize("model_test", ["LinearChain", "SemiMarkov", "Dep"])
@pytest.mark.parametrize("semiring", [LogSemiring, MaxSemiring])
def test_log_shapes(model_test, semiring, data):
gen = Gen(model_test, data, semiring)
alpha = gen.struct.sum(gen.vals)
count = gen.enum()[0]
assert alpha.shape[0] == gen.batch
assert count.shape[0] == gen.batch
assert alpha.shape == count.shape
assert torch.isclose(count[0], alpha[0])
@given(data())
@pytest.mark.parametrize("model_test", ["LinearChain", "SemiMarkov"])
def test_entropy(model_test, data):
"Test entropy by manual enumeration"
gen = Gen(model_test, data, EntropySemiring)
alpha = gen.struct.sum(gen.vals)
log_z = gen.model(LogSemiring).sum(gen.vals)
log_probs = gen.enum(LogSemiring)[1]
log_probs = torch.stack(log_probs, dim=1) - log_z
entropy = -log_probs.mul(log_probs.exp()).sum(1).squeeze(0)
assert entropy.shape == alpha.shape
assert torch.isclose(entropy, alpha).all()
@given(data())
@pytest.mark.parametrize("model_test", ["LinearChain"])
def test_sparse_max(model_test, data):
gen = Gen(model_test, data, SparseMaxSemiring)
gen.vals.requires_grad_(True)
gen.struct.sum(gen.vals)
sparsemax = gen.struct.marginals(gen.vals)
sparsemax.sum().backward()
@given(data())
@pytest.mark.parametrize("model_test", ["LinearChain", "SemiMarkov", "Dep"])
def test_kmax(model_test, data):
"Test out the k-max semiring"
K = 2
gen = Gen(model_test, data, KMaxSemiring(K))
max1 = gen.model(MaxSemiring).sum(gen.vals)
alpha = gen.struct.sum(gen.vals, _raw=True)
# 2max is less than max.
assert (alpha[0] == max1).all()
assert (alpha[1] <= max1).all()
topk = gen.struct.marginals(gen.vals, _raw=True)
argmax = gen.model(MaxSemiring).marginals(gen.vals)
# Argmax is different than 2-argmax
assert (topk[0] == argmax).all()
assert (topk[1] != topk[0]).any()
if model_test != "Dep":
log_probs = gen.enum(MaxSemiring)[1]
tops = torch.topk(torch.cat(log_probs, dim=0), 5, 0)[0]
assert torch.isclose(gen.struct.score(topk[1], gen.vals), alpha[1]).all()
for k in range(K):
assert (torch.isclose(alpha[k], tops[k])).all()
@given(data())
@pytest.mark.parametrize("model_test", ["CKY"])
@pytest.mark.parametrize("semiring", [LogSemiring, MaxSemiring])
def test_cky(model_test, semiring, data):
gen = Gen(model_test, data, semiring)
alpha = gen.struct.sum(gen.vals)
count = gen.enum()[0]
assert alpha.shape[0] == gen.batch
assert count.shape[0] == gen.batch
assert alpha.shape == count.shape
assert torch.isclose(count[0], alpha[0])
@given(data())
@pytest.mark.parametrize("model_test", ["LinearChain", "SemiMarkov", "CKY_CRF", "Dep"])
def test_max(model_test, data):
"Test that argmax score is the same as max"
gen = Gen(model_test, data, MaxSemiring)
score = gen.struct.sum(gen.vals)
marginals = gen.struct.marginals(gen.vals)
assert torch.isclose(score, gen.struct.score(gen.vals, marginals)).all()
@given(data())
@pytest.mark.parametrize("semiring", [LogSemiring, MaxSemiring])
@pytest.mark.parametrize("model_test", ["Dep"])
def test_labeled_proj_deptree(model_test, semiring, data):
gen = Gen(model_test, data, semiring)
arc_scores = torch.rand(3, 5, 5, 7)
gen.vals = semiring.sum(arc_scores)
count = gen.enum()[0]
alpha = gen.struct.sum(arc_scores)
assert torch.isclose(count, alpha).all()
struct = gen.model(MaxSemiring)
max_score = struct.sum(arc_scores)
argmax = struct.marginals(arc_scores)
assert torch.isclose(max_score, struct.score(arc_scores, argmax)).all()
# todo: add CKY, DepTree too?
@given(data())
@pytest.mark.parametrize("model_test", ["LinearChain", "SemiMarkov", "Dep", "CKY_CRF"])
def test_parts_from_marginals(model_test, data):
gen = Gen(model_test, data, MaxSemiring)
edge = gen.struct.marginals(gen.vals).long()
sequence, extra = gen.model.from_parts(edge)
edge_ = gen.model.to_parts(sequence, extra)
assert (torch.isclose(edge, edge_)).all(), edge - edge_
sequence_, extra_ = gen.model.from_parts(edge_)
assert extra == extra_, (extra, extra_)
assert (torch.isclose(sequence, sequence_)).all(), sequence - sequence_
@given(data())
@pytest.mark.parametrize("model_test", ["LinearChain", "SemiMarkov"])
def test_parts_from_sequence(model_test, data):
gen = Gen(model_test, data, LogSemiring)
C = gen.vals.size(-1)
if isinstance(gen.struct, LinearChain):
K = 2
background = 0
extra = C
elif isinstance(gen.struct, SemiMarkov):
K = gen.vals.size(-3)
background = -1
extra = C, K
else:
raise NotImplementedError()
sequence = torch.full((gen.batch, gen.N), background, dtype=int)
for b in range(gen.batch):
i = 0
while i < gen.N:
symbol = torch.randint(0, C, (1,)).item()
sequence[b, i] = symbol
length = torch.randint(1, K, (1,)).item()
i += length
edge = gen.model.to_parts(sequence, extra)
sequence_, extra_ = gen.model.from_parts(edge)
assert extra == extra_, (extra, extra_)
assert (torch.isclose(sequence, sequence_)).all(), sequence - sequence_
edge_ = gen.model.to_parts(sequence_, extra_)
assert (torch.isclose(edge, edge_)).all(), edge - edge_
@given(data())
@pytest.mark.parametrize("model_test", ["LinearChain", "SemiMarkov", "CKY_CRF", "Dep"])
def test_generic_lengths(model_test, data):
gen = Gen(model_test, data, LogSemiring)
model, struct, vals, N, batch = gen.model, gen.struct, gen.vals, gen.N, gen.batch
lengths = torch.tensor(
[data.draw(integers(min_value=2, max_value=N)) for b in range(batch - 1)] + [N]
)
m = model(MaxSemiring).marginals(vals, lengths=lengths)
maxes = struct.score(vals, m)
part = model().sum(vals, lengths=lengths)
# Check that max is correct
assert (maxes <= part + 1e-3).all()
m_part = model(MaxSemiring).sum(vals, lengths=lengths)
assert (torch.isclose(maxes, m_part)).all(), maxes - m_part
if model == CKY:
return
seqs, extra = struct.from_parts(m)
full = struct.to_parts(seqs, extra, lengths=lengths)
assert (full == m.type_as(full)).all(), "%s %s %s" % (
full.shape,
m.shape,
(full - m.type_as(full)).nonzero(),
)
@given(data())
@pytest.mark.parametrize(
"model_test", ["LinearChain", "SemiMarkov", "Dep", "CKY", "CKY_CRF"]
)
def test_params(model_test, data):
gen = Gen(model_test, data, LogSemiring)
_, struct, vals, _, _ = gen.model, gen.struct, gen.vals, gen.N, gen.batch
if isinstance(vals, tuple):
vals = tuple((v.requires_grad_(True) for v in vals))
else:
vals.requires_grad_(True)
alpha = struct.sum(vals)
alpha.sum().backward()
@given(data())
@pytest.mark.parametrize("model_test", ["LinearChain", "SemiMarkov", "Dep"])
def test_gumbel(model_test, data):
gen = Gen(model_test, data, LogSemiring)
gen.vals.requires_grad_(True)
alpha = gen.struct.marginals(gen.vals)
print(alpha[0])
print(torch.autograd.grad(alpha, gen.vals, alpha.detach())[0][0])
def test_hmm():
C, V, batch, N = 5, 20, 2, 5
transition = torch.rand(C, C)
emission = torch.rand(V, C)
init = torch.rand(C)
observations = torch.randint(0, V, (batch, N))
out = LinearChain.hmm(transition, emission, init, observations)
LinearChain().sum(out)
def test_sparse_max2():
print(LinearChain(SparseMaxSemiring).sum(torch.rand(1, 8, 3, 3)))
print(LinearChain(SparseMaxSemiring).marginals(torch.rand(1, 8, 3, 3)))
# assert(False)
def test_lc_custom():
model = LinearChain
vals, _ = model._rand()
struct = LinearChain(LogSemiring)
marginals = struct.marginals(vals)
s = struct.sum(vals)
struct = LinearChain(CheckpointSemiring(LogSemiring, 1))
marginals2 = struct.marginals(vals)
s2 = struct.sum(vals)
assert torch.isclose(s, s2).all()
assert torch.isclose(marginals, marginals2).all()
struct = LinearChain(CheckpointShardSemiring(LogSemiring, 1))
marginals2 = struct.marginals(vals)
s2 = struct.sum(vals)
assert torch.isclose(s, s2).all()
assert torch.isclose(marginals, marginals2).all()
# struct = LinearChain(LogMemSemiring)
# marginals2 = struct.marginals(vals)
# s2 = struct.sum(vals)
# assert torch.isclose(s, s2).all()
# assert torch.isclose(marginals, marginals).all()
# struct = LinearChain(LogMemSemiring)
# marginals = struct.marginals(vals)
# s = struct.sum(vals)
# struct = LinearChain(LogSemiringKO)
# marginals2 = struct.marginals(vals)
# s2 = struct.sum(vals)
# assert torch.isclose(s, s2).all()
# assert torch.isclose(marginals, marginals).all()
# print(marginals)
# print(marginals2)
# struct = LinearChain(LogSemiring)
# marginals = struct.marginals(vals)
# s = struct.sum(vals)
# struct = LinearChain(LogSemiringKO)
# marginals2 = struct.marginals(vals)
# s2 = struct.sum(vals)
# assert torch.isclose(s, s2).all()
# print(marginals)
# print(marginals2)
# struct = LinearChain(MaxSemiring)
# marginals = struct.marginals(vals)
# s = struct.sum(vals)
# struct = LinearChain(MaxSemiringKO)
# marginals2 = struct.marginals(vals)
# s2 = struct.sum(vals)
# assert torch.isclose(s, s2).all()
# assert torch.isclose(marginals, marginals2).all()
@given(data())
@pytest.mark.parametrize("model_test", ["Dep"])
@pytest.mark.parametrize("semiring", [LogSemiring])
def test_non_proj(model_test, semiring, data):
gen = Gen(model_test, data, semiring)
alpha = deptree_part(gen.vals, False)
count = gen.test.enumerate(LogSemiring, gen.vals, non_proj=True, multi_root=False)[
0
]
assert alpha.shape[0] == gen.batch
assert count.shape[0] == gen.batch
assert alpha.shape == count.shape
# assert torch.isclose(count[0], alpha[0], 1e-2)
alpha = deptree_part(gen.vals, True)
count = gen.test.enumerate(LogSemiring, gen.vals, non_proj=True, multi_root=True)[0]
assert alpha.shape[0] == gen.batch
assert count.shape[0] == gen.batch
assert alpha.shape == count.shape
# assert torch.isclose(count[0], alpha[0], 1e-2)
marginals = deptree_nonproj(gen.vals, multi_root=False)
print(marginals.sum(1))
marginals = deptree_nonproj(gen.vals, multi_root=True)
print(marginals.sum(1))
# # assert(False)
# # vals, _ = model._rand()
# # struct = model(MaxSemiring)
# # score = struct.sum(vals)
# # marginals = struct.marginals(vals)
# # assert torch.isclose(score, struct.score(vals, marginals)).all()
@given(data())
@settings(max_examples=50, deadline=None)
def ignore_alignment(data):
# log_potentials = torch.ones(2, 2, 2, 3)
# v = Alignment(StdSemiring).sum(log_potentials)
# print("FINAL", v)
# log_potentials = torch.ones(2, 3, 2, 3)
# v = Alignment(StdSemiring).sum(log_potentials)
# print("FINAL", v)
# log_potentials = torch.ones(2, 6, 2, 3)
# v = Alignment(StdSemiring).sum(log_potentials)
# print("FINAL", v)
# log_potentials = torch.ones(2, 7, 2, 3)
# v = Alignment(StdSemiring).sum(log_potentials)
# print("FINAL", v)
# log_potentials = torch.ones(2, 8, 2, 3)
# v = Alignment(StdSemiring).sum(log_potentials)
# print("FINAL", v)
# assert False
# model = data.draw(sampled_from([Alignment]))
# semiring = data.draw(sampled_from([StdSemiring]))
# struct = model(semiring)
# vals, (batch, N) = model._rand()
# print(batch, N)
# struct = model(semiring)
# # , max_gap=max(3, abs(vals.shape[1] - vals.shape[2]) + 1))
# vals.fill_(1)
# alpha = struct.sum(vals)
model = data.draw(sampled_from([Alignment]))
semiring = data.draw(sampled_from([StdSemiring]))
test = test_lookup[model](semiring)
struct = model(semiring, sparse_rounds=10)
vals, (batch, N) = test._rand()
alpha = struct.sum(vals)
count = test.enumerate(vals)[0]
assert torch.isclose(count, alpha).all()
model = data.draw(sampled_from([Alignment]))
semiring = data.draw(sampled_from([LogSemiring]))
struct = model(semiring, sparse_rounds=10)
vals, (batch, N) = model._rand()
alpha = struct.sum(vals)
count = test_lookup[model](semiring).enumerate(vals)[0]
assert torch.isclose(count, alpha).all()
# model = data.draw(sampled_from([Alignment]))
# semiring = data.draw(sampled_from([MaxSemiring]))
# struct = model(semiring)
# log_potentials = torch.ones(2, 2, 2, 3)
# v = Alignment(StdSemiring).sum(log_potentials)
log_potentials = torch.ones(2, 2, 8, 3)
v = Alignment(MaxSemiring).sum(log_potentials)
# print(v)
# assert False
m = Alignment(MaxSemiring).marginals(log_potentials)
score = Alignment(MaxSemiring).score(log_potentials, m)
assert torch.isclose(v, score).all()
semiring | |
<filename>03_conv_nets/3-1_introduction/lesson_1_37_CNN_for_CIFAR.py
### Convolutional Neural Networks
'''
In this notebook, we train a CNN to classify images from the CIFAR-10 database.
The images in this database are small color images that fall into one of ten classes; some example images are pictured below.
'''
'''
Test for CUDA
Since these are larger (32x32x3) images, it may prove useful to speed up your training time by using a GPU. CUDA is a parallel computing platform and CUDA Tensors are the same as typical Tensors, only they utilize GPU's for computation.
'''
import torch
import numpy as np
# check if CUDA is available
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
'''
Load the Data
Downloading may take a minute. We load in the training and test data, split the training data into a training and validation set, then create DataLoaders for each of these sets of data.
'''
from torchvision import datasets
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# percentage of training set to use as validation
valid_size = 0.2
# convert data to a normalized torch.FloatTensor
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
##### alternatively with data augmentation
transform_aug = transforms.Compose([
transforms.RandomHorizontalFlip(), # randomly flip and rotate
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# choose the training and test datasets
train_data = datasets.CIFAR10('data', train=True,
download=True, transform=transform_aug)
test_data = datasets.CIFAR10('data', train=False,
download=True, transform=transform_test)
# obtain training indices that will be used for validation
num_train = len(train_data)
indices = list(range(num_train))
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
# define samplers for obtaining training and validation batches
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# prepare data loaders (combine dataset and sampler)
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
sampler=train_sampler, num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
sampler=valid_sampler, num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
# specify the image classes
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
#### Visualize a Batch of Training Data
import matplotlib.pyplot as plt
#%matplotlib inline
# helper function to un-normalize and display an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy() # convert images to numpy for display
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
# display 20 images
for idx in np.arange(20):
ax = fig.add_subplot(2, int(20/2), idx+1, xticks=[], yticks=[])
imshow(images[idx])
ax.set_title(classes[labels[idx]])
plt.show()
'''
View an Image in More Detail
Here, we look at the normalized red, green, and blue (RGB) color channels as three separate, grayscale intensity images.
'''
rgb_img = np.squeeze(images[3])
channels = ['red channel', 'green channel', 'blue channel']
fig = plt.figure(figsize = (36, 36))
for idx in np.arange(rgb_img.shape[0]):
ax = fig.add_subplot(1, 3, idx + 1)
img = rgb_img[idx]
ax.imshow(img, cmap='gray')
ax.set_title(channels[idx])
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center', size=8,
color='white' if img[x][y]<thresh else 'black')
plt.show()
#### Define the Network Architecture
'''
This time, you'll define a CNN architecture. Instead of an MLP, which used linear, fully-connected layers, you'll use the following:
Convolutional layers, which can be thought of as stack of filtered images.
Maxpooling layers, which reduce the x-y size of an input, keeping only the most active pixels from the previous layer.
The usual Linear + Dropout layers to avoid overfitting and produce a 10-dim output.
A network with 2 convolutional layers is shown in the image below and in the code, and you've been given starter code with one convolutional and one maxpooling layer.
TODO: Define a model with multiple convolutional layers, and define the feedforward network behavior.
The more convolutional layers you include, the more complex patterns in color and shape a model can detect. It's suggested that your final model include 2 or 3 convolutional layers as well as linear layers + dropout in between to avoid overfitting.
It's good practice to look at existing research and implementations of related models as a starting point for defining your own models. You may find it useful to look at this PyTorch classification example or this, more complex Keras example to help decide on a final structure.
Output volume for a convolutional layer
To compute the output size of a given convolutional layer we can perform the following calculation (taken from Stanford's cs231n course):
We can compute the spatial size of the output volume as a function of the input volume size (W), the kernel/filter size (F), the stride with which they are applied (S), and the amount of zero padding used (P) on the border. The correct formula for calculating how many neurons define the output_W is given by (W−F+2P)/S+1.
For example for a 7x7 input and a 3x3 filter with stride 1 and pad 0 we would get a 5x5 output. With stride 2 we would get a 3x3 output.
'''
### see here for the winning architecture:
# http://blog.kaggle.com/2015/01/02/cifar-10-competition-winners-interviews-with-dr-ben-graham-phil-culliton-zygmunt-zajac/
### see here for pytorch tutorial:
# https://github.com/pytorch/tutorials/blob/master/beginner_source/blitz/cifar10_tutorial.py
#################### NOTE this ist version 1, the bigger model
import torch.nn as nn
import torch.nn.functional as F
# define the CNN architecture
class Net1(nn.Module):
def __init__(self):
super(Net1, self).__init__()
# setup
num_classes = 10
drop_p = 0.5
# convolutional layer
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
# max pooling layer
self.pool = nn.MaxPool2d(2, 2)
# fully connected layer
self.fc1 = nn.Linear(1024, 256, bias=True)
self.fc2 = nn.Linear(256, 64, bias=True)
self.fc3 = nn.Linear(64, num_classes, bias=True)
# dropout
self.dropout = nn.Dropout(drop_p)
def forward(self, x):
# add sequence of convolutional and max pooling layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
# flatten image, keep batch size
x = x.view(x.shape[0], -1)
x = self.dropout(self.fc1(x))
x = self.dropout(self.fc2(x))
x = F.log_softmax(self.fc3(x), dim=1)
return x
##################### NOTE this is version 2, the smaller model
import torch.nn as nn
import torch.nn.functional as F
# define the CNN architecture
class Net2(nn.Module):
def __init__(self):
super(Net2, self).__init__()
# setup
num_classes = 10
drop_p = 0.25
# convolutional layer
self.conv1 = nn.Conv2d(3, 8, 3, padding=1)
self.conv2 = nn.Conv2d(8, 16, 3, padding=1)
self.conv3 = nn.Conv2d(16, 32, 3, padding=1)
# max pooling layer
self.pool = nn.MaxPool2d(2, 2)
# fully connected layer
self.fc1 = nn.Linear(512, 128, bias=True)
self.fc2 = nn.Linear(128, 64, bias=True)
self.fc3 = nn.Linear(64, num_classes, bias=True)
# dropout
self.dropout = nn.Dropout(drop_p)
def forward(self, x):
# add sequence of convolutional and max pooling layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
# flatten image, keep batch size
x = x.view(x.shape[0], -1)
x = self.dropout(self.fc1(x))
x = self.dropout(self.fc2(x))
x = F.log_softmax(self.fc3(x), dim=1)
return x
##################### NOTE this is the official solution example, it is similar to version 1
# * Conv-layer sind wie bei mir
# * hat einen fc layer weniger als ich
# * hat einen dropout hinter dem letzten conv, das hatte ich nicht
# * hat eine relu hinter dem ersten fc, das hatte ich nicht
import torch.nn as nn
import torch.nn.functional as F
# define the CNN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# convolutional layer
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, 64, 3, padding=1)
# max pooling layer
self.pool = nn.MaxPool2d(2, 2)
# fully connected layer
self.fc1 = nn.Linear(64 * 4 * 4, 500)
self.fc2 = nn.Linear(500, 10)
# dropout
self.dropout = nn.Dropout(0.25)
def forward(self, x):
# add sequence of convolutional and max pooling layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
# flatten image, keep batch size
x = x.view(-1, 64 * 4 * 4)
x = self.dropout(x)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
#################### NOTE this is version 3, after seeing the official solution
import torch.nn as nn
import torch.nn.functional as F
# define the CNN architecture
class Net3(nn.Module):
def __init__(self):
super(Net3, self).__init__()
# setup
num_classes = 10
drop_p = 0.5
# convolutional layer
self.conv1 = nn.Conv2d(3, 16, 3, padding=1)
self.conv2 = nn.Conv2d(16, 32, 3, padding=1)
self.conv3 = nn.Conv2d(32, | |
<gh_stars>0
import os
import sys
import json
import copy
import mdtraj
import numpy as np
import time
import pandas as pd
import pickle
import mdtraj as md
import multiprocessing as mp
try:
import cupy as cp
cudaExists = True
import kernel
except ImportError as e:
cudaExists = False
print("Can't load CuPy, contact fingerprint will not run")
# sys.path.insert(1, os.path.join(sys.path[0], '../test_contacts/contacts/contacts/'))
import importlib
ligand_map = {'A': 0, 'C': 0, 'N': 1, 'NA': 1, 'O': 2, 'OA': 2, 'F': 3, 'P': 4, 'S': 5, 'SA': 5, 'CL': 6,
'BR': 7, 'I': 8, 'H': 9}
protein_map = {'A': 0, 'C': 0, 'N': 1, 'NA': 1, 'O': 2, 'OA': 2, 'S': 3, 'SA': 3, 'H': 4}
# import MDAnalysis as mda
# import MDAnalysis.analysis.rms
def ALIGN_A_RMSD_B(P, Q, A, B):
# P is the one to be aligned (N * 3)
# Q is the ref (N * 3)
# A is the list of index to be considered for alignment (protein) (N * 1)
# B is the list of index to calculate RMSD (ligand) (N * 1)
# Returns rmsd between subset P[B] and Q[B]
PU = P[A] # Get subset
QU = Q[A] # Get subset
PC = PU - PU.mean(axis=0) # Center points
QC = QU - QU.mean(axis=0) # Center points
# Kabsch method
C = np.dot(np.transpose(PC), QC)
V, S, W = np.linalg.svd(C,full_matrices=False)
d = (np.linalg.det(V) * np.linalg.det(W)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
# Create Rotation matrix U
U = np.dot(V, W)
P = P - PU.mean(axis=0) # Move all points
Q = Q - QU.mean(axis=0) # Move all points
P = np.dot(P, U) # Rotate P
diff = P[B] - Q[B]
N = len(P[B])
return np.sqrt((diff * diff).sum() / N), P + QU.mean(axis=0)
def pureRMSD(P, Q):
# Assume P and Q are aligned firsted
diff = P - Q
N = len(P)
return np.sqrt((diff * diff).sum() / N)
def ReadLog(fname):
with open(fname,'r') as f:
cont = f.readlines()
Step = []
Time = []
Temperature = []
Etot = []
EKtot = []
EPtot = []
Bond = []
Angle = []
Dihedral = []
Elec = []
vdW = []
Solvent = []
start_print = False
for line in cont:
if '(6.) RESULTS' in line:
start_print = True
if 'Averages over' in line:
start_print = False
if start_print == True:
if 'Step' in line:
ele = line.split()
Step.append(int(ele[1]))
Time.append(float(ele[3]))
if 'Temperature' in line:
Temperature.append(float(line.split(':')[1].strip()))
if 'Etot' in line:
ele = line.split()
Etot.append(float(ele[1]))
EKtot.append(float(ele[3]))
EPtot.append(float(ele[5]))
if 'Bond' in line:
ele = line.split()
Bond.append(float(ele[1]))
Angle.append(float(ele[3]))
Dihedral.append(float(ele[5]))
if 'Elec' in line:
ele = line.split()
Elec.append(float(ele[1]))
vdW.append(float(ele[3]))
Solvent.append(float(ele[5]))
# print(line.strip('\n'))
output = np.array([Step, Time, Temperature, Etot, EKtot, EPtot, Bond, Angle, Dihedral, Elec, vdW, Solvent]).T
output = pd.DataFrame(output, columns=['Step', 'Time', 'Temperature', 'Etot', 'EKtot', 'EPtot', 'Bond', 'Angle', 'Dihedral', 'Elec', 'vdW', 'Solvent'])
return output
def Distribute_Lig_pose_RMSD(arg):
obj, crystalComp = arg
obj.calculateRMSD(crystalComp)
return obj
def readMDCRD(fname, natom):
with open(fname, 'r') as f:
f.readline()
cont = f.read()
xyz = list(map(float,cont.split()))
return np.array(xyz).reshape(-1, natom, 3)
class mdgxTrajectory:
def __init__(self, trjFile, outFile, rstFile, ioutfm):
self.trjFile = trjFile
self.outFile = outFile
self.rstFile = rstFile
self.ioutfm = ioutfm
self.hasRMSD = False
self.hasTrjFile = False
self.hasOutFile = False
self.output = None
self.RMSD = []
self.contactScore = []
self.hasContactScore = False
self.ligandTrajectory = None # We store this information for contact calculation
self.ligandTrajectoryH = None # We store this information for contact calculation
self.hasLigandTrajectory = False
# self.trjLength = 0
# self.rmsd = []
def calculateRMSD(self, prmtop, crystalComp, lig_len, ligand_res):
# This function also logs the ligand trajectory even if RMSD calculation fails
try:
if (not self.hasRMSD):
Profile = np.random.random() < 0.00
if Profile:
t0 = time.time()
if self.ioutfm == 1: # Binary netcdf trajectory
comp = mdtraj.load_netcdf(self.trjFile, top=prmtop)
elif self.ioutfm == 0: # ASCII MDCRD trajectory, loading is slower
systemLen = crystalComp.n_atoms
comp = readMDCRD(self.trjFile, systemLen)
if Profile:
t1 = time.time()
self.trjLength = len(comp)
if Profile:
t2 = time.time()
R = []
LT = []
systemLen = crystalComp.n_atoms
referenceXYZ = crystalComp.xyz[0]
if self.ioutfm == 1:
for xyz in comp.xyz:
R_this, LT_this = ALIGN_A_RMSD_B(xyz*10, referenceXYZ*10,
range(0, systemLen-lig_len), range((systemLen-lig_len), systemLen))
R.append(R_this)
LT.append(LT_this)
elif self.ioutfm == 0:
for xyz in comp:
R_this, LT_this = ALIGN_A_RMSD_B(xyz, referenceXYZ*10,
range(0, systemLen-lig_len), range((systemLen-lig_len), systemLen))
R.append(R_this)
LT.append(LT_this)
self.RMSD = np.array(R)
LT = np.array(LT)
if Profile:
t3 = time.time()
self.output = ReadLog(self.outFile)
if Profile:
t4 = time.time()
self.hasRMSD = True
if Profile:
print(f' Profiling: Loading comp {(t1-t0)*1000:.3f} ms | Superpose {(t2-t1)*1000:.3f} ms | RMSD {(t3-t2)*1000:.3f} ms | Read output {(t4-t3)*1000:.3f} ms ')
self.hasTrjFile = True
self.hasOutFile = True
if not self.hasLigandTrajectory: # Also store ligand trajectories for contact analysis
lig = crystalComp.top.select(f"residue {ligand_res} and not symbol H")
ligH = crystalComp.top.select(f"residue {ligand_res}")
pro = crystalComp.top.select(f"not residue {ligand_res} and not symbol H")
self.ligandTrajectory = LT[:,lig]
self.ligandTrajectoryH = LT[:, ligH]
self.hasLigandTrajectory = True
except:
pass
def readOutput(self):
try:
self.output = ReadLog(self.outFile)
self.hasOutFile = True
except:
pass
def readLigTraj(self, prmtop, initialPose, ligand_res):
# This function just logs the ligand trajectory
# initialPose is for getting the systemLen
if not self.hasLigandTrajectory:
if self.ioutfm == 1: # Binary netcdf trajectory
initialComp = mdtraj.load_netcdf(self.trjFile, top=prmtop)
elif self.ioutfm == 0: # ASCII MDCRD trajectory, loading is slower
initialComp = mdtraj.load(self.initialPose, top=prmtop)
systemLen = initialComp.n_atoms
comp = readMDCRD(self.trjFile, systemLen)
lig = initialComp.top.select(f"residue {ligand_res} and not symbol H")
ligH = initialComp.top.select(f"residue {ligand_res}")
pro = initialComp.top.select(f"not residue {ligand_res} and not symbol H")
self.ligandTrajectory = LT[:,lig]
self.ligandTrajectoryH = LT[:, ligH]
self.hasLigandTrajectory = True
def getContactScore(self, prmtop, ligand_res):
t0 = time.time()
if self.ioutfm == 1:
comp = md.load(self.trjFile, top=prmtop)
elif self.ioutfm == 0:
comp = md.load_mdcrd(self.trjFile, top=prmtop)
# print(comp)
t1 = time.time()
pro = comp.top.select(f"not residue {ligand_res} and not symbol H")
lig = comp.top.select(f"residue {ligand_res} and not symbol H")
close_atoms = np.array([ 45, 106, 107, 167, 168, 170, 175, 176, 177, 178, 179, 180, 181, 182, 232, 259, 381, 386, 387, 388])
# self.proteinCoordinates = comp.xyz[0][pro][close_atoms]*10
# self.ligandTrajectory2 = comp.xyz[:,lig]*10
x_ligand = cp.array(comp.xyz[:,lig,0].flatten()*10)
y_ligand = cp.array(comp.xyz[:,lig,1].flatten()*10)
z_ligand = cp.array(comp.xyz[:,lig,2].flatten()*10)
x_protein = cp.array(comp.xyz[0][pro][close_atoms][:,0]*10)
y_protein = cp.array(comp.xyz[0][pro][close_atoms][:,1]*10)
z_protein = cp.array(comp.xyz[0][pro][close_atoms][:,2]*10)
t_protein = cp.array(cp.arange(len(close_atoms)), dtype=cp.int32)
# types_protein = cp.array([protein_map[x.element.symbol.upper()] for x in np.array(list(comp.top.atoms))[pro]], dtype=cp.int32)
types_ligand = cp.tile(cp.array([ligand_map[x.element.symbol.upper()] for x in np.array(list(comp.top.atoms))[lig]], dtype=cp.int32), comp.n_frames)
offset = cp.linspace(0,len(types_ligand),comp.n_frames+1,dtype=cp.int32)
nbins = 1
binsize = 3.4
t2 = time.time()
feat = kernel.compute(x_ligand, y_ligand, z_ligand, types_ligand, offset,
x_protein, y_protein, z_protein, t_protein,
cutoff=nbins*binsize,binsize=binsize,nbins=nbins,
n_receptor_types=len(t_protein), max_ligand_atoms=(len(lig)+31)//32*32)
print(feat.shape)
t3 = time.time()
self.features = cp.asnumpy(feat)
# print(feat2.shape)
t35 = time.time()
# self.contactScore = feat2.reshape(len(comp), -1, len(pro))
# self.contactScore = contactScore.get()
# self.hasContactScore = True
t4 = time.time()
print(f'Timing: Load {(t1-t0)*1000:.2f} ms, set {(t2-t1)*1000:.2f} ms, calc {(t3-t2)*1000:.2f} ms, convert {(t4-t3)*1000:.2f} ms')
print(f'Timing: feat get {(t35-t3)*1000:.2f} ms, reshape {(t4-t35)*1000:.2f} ms')
def inheritMdgxTrajectory(self, TRJ):
self.trjFile = TRJ.trjFile
self.outFile = TRJ.outFile
self.rstFile = TRJ.rstFile
self.hasRMSD = TRJ.hasRMSD
# if self.hasRMSD:
self.RMSD = TRJ.RMSD
self.ioutfm = TRJ.ioutfm
self.hasTrjFile = TRJ.hasTrjFile
self.hasOutFile = TRJ.hasOutFile
self.output = TRJ.output
try:
self.hasContactScore = TRJ.hasContactScore
self.contactScore = TRJ.contactScore
except:
self.hasContactScore = False
self.contactScore = None
# try:
self.ligandTrajectory = TRJ.ligandTrajectory
self.ligandTrajectoryH = TRJ.ligandTrajectoryH
self.hasLigandTrajectory = TRJ.hasLigandTrajectory
# self.proteinCheck = TRJ.proteinCheck
# except:
# self.ligandTrajectory = None
# self.hasLigandTrajectory = False
# self.RMSD = TRJ.RMSD
class Pose:
def __init__(self, name, rank, successQR=False, ligand_res=None, settings=None, folderMetadata=None, simulationPrefixes=['EM','QR','MD']):
self.poseName = name
self.ligandName = name.split('_')[0]
self.nrep = {}
self.length = {}
self.writeCrdInterval = {}
self.ioutfm = {}
self.timeStep = {}
self.successQR = successQR # If QR succeeds all subsequent rounds succeed.
self.ligand_res = ligand_res
self.simulationPrefixes = simulationPrefixes
for simPrefix in self.simulationPrefixes:
self.nrep[simPrefix] = int(settings[simPrefix]['N-rep'])
self.length[simPrefix] = int(settings[simPrefix]['cntrl']['nstlim'])
self.writeCrdInterval[simPrefix] = int(settings[simPrefix]['cntrl']['ntwx'])
self.ioutfm[simPrefix] = int(settings[simPrefix]['cntrl']['ioutfm'])
self.timeStep[simPrefix] = float(settings[simPrefix]['cntrl']['dt'])
if 'EX' in simPrefix:
self.nrep[simPrefix] = self.nrep['MD']
self.outSuffix = settings['EM']['files']['-osf']
self.rstSuffix = settings['EM']['files']['-rsf']
self.crdSuffix = settings['EM']['files']['-xsf']
self.rootFolder = folderMetadata['rootFolder']
self.referenceFolder = folderMetadata['referenceFolder']
self.structureFolder = folderMetadata['structureFolder']
self.simulationFolder = folderMetadata['simulationFolder']
self.inpcrdFolder = folderMetadata['inpcrdFolder']
self.prmtopFolder = folderMetadata['prmtopFolder']
self.traj = {}
for simPrefix in self.simulationPrefixes:
self.traj[simPrefix] = []
if simPrefix == 'EM' or simPrefix == 'QR':
pass
elif not self.successQR:
continue
for ii in range(self.nrep[simPrefix]):
trjFile = f'{self.simulationFolder}/{self.poseName}/{simPrefix}_R{ii+1}{self.crdSuffix}'
outFile = f'{self.simulationFolder}/{self.poseName}/{simPrefix}_R{ii+1}{self.outSuffix}'
rstFile = f'{self.simulationFolder}/{self.poseName}/{simPrefix}_R{ii+1}{self.rstSuffix}'
self.traj[simPrefix].append(mdgxTrajectory(trjFile, outFile, rstFile, self.ioutfm[simPrefix]))
# Also include initial pose
self.initialPose = f'{self.inpcrdFolder}/{name}.inpcrd'
| |
},
89: {
'col_and_row': u'D17',
'row': 4,
'col': 17,
'well_id': 89,
},
90: {
'col_and_row': u'D18',
'row': 4,
'col': 18,
'well_id': 90,
},
91: {
'col_and_row': u'D19',
'row': 4,
'col': 19,
'well_id': 91,
},
92: {
'col_and_row': u'D20',
'row': 4,
'col': 20,
'well_id': 92,
},
93: {
'col_and_row': u'D21',
'row': 4,
'col': 21,
'well_id': 93,
},
94: {
'col_and_row': u'D22',
'row': 4,
'col': 22,
'well_id': 94,
},
95: {
'col_and_row': u'D23',
'row': 4,
'col': 23,
'well_id': 95,
},
96: {
'col_and_row': u'D24',
'row': 4,
'col': 24,
'well_id': 96,
},
97: {
'col_and_row': u'E1',
'row': 5,
'col': 1,
'well_id': 97,
},
98: {
'col_and_row': u'E2',
'row': 5,
'col': 2,
'well_id': 98,
},
99: {
'col_and_row': u'E3',
'row': 5,
'col': 3,
'well_id': 99,
},
100: {
'col_and_row': u'E4',
'row': 5,
'col': 4,
'well_id': 100,
},
101: {
'col_and_row': u'E5',
'row': 5,
'col': 5,
'well_id': 101,
},
102: {
'col_and_row': u'E6',
'row': 5,
'col': 6,
'well_id': 102,
},
103: {
'col_and_row': u'E7',
'row': 5,
'col': 7,
'well_id': 103,
},
104: {
'col_and_row': u'E8',
'row': 5,
'col': 8,
'well_id': 104,
},
105: {
'col_and_row': u'E9',
'row': 5,
'col': 9,
'well_id': 105,
},
106: {
'col_and_row': u'E10',
'row': 5,
'col': 10,
'well_id': 106,
},
107: {
'col_and_row': u'E11',
'row': 5,
'col': 11,
'well_id': 107,
},
108: {
'col_and_row': u'E12',
'row': 5,
'col': 12,
'well_id': 108,
},
109: {
'col_and_row': u'E13',
'row': 5,
'col': 13,
'well_id': 109,
},
110: {
'col_and_row': u'E14',
'row': 5,
'col': 14,
'well_id': 110,
},
111: {
'col_and_row': u'E15',
'row': 5,
'col': 15,
'well_id': 111,
},
112: {
'col_and_row': u'E16',
'row': 5,
'col': 16,
'well_id': 112,
},
113: {
'col_and_row': u'E17',
'row': 5,
'col': 17,
'well_id': 113,
},
114: {
'col_and_row': u'E18',
'row': 5,
'col': 18,
'well_id': 114,
},
115: {
'col_and_row': u'E19',
'row': 5,
'col': 19,
'well_id': 115,
},
116: {
'col_and_row': u'E20',
'row': 5,
'col': 20,
'well_id': 116,
},
117: {
'col_and_row': u'E21',
'row': 5,
'col': 21,
'well_id': 117,
},
118: {
'col_and_row': u'E22',
'row': 5,
'col': 22,
'well_id': 118,
},
119: {
'col_and_row': u'E23',
'row': 5,
'col': 23,
'well_id': 119,
},
120: {
'col_and_row': u'E24',
'row': 5,
'col': 24,
'well_id': 120,
},
121: {
'col_and_row': u'F1',
'row': 6,
'col': 1,
'well_id': 121,
},
122: {
'col_and_row': u'F2',
'row': 6,
'col': 2,
'well_id': 122,
},
123: {
'col_and_row': u'F3',
'row': 6,
'col': 3,
'well_id': 123,
},
124: {
'col_and_row': u'F4',
'row': 6,
'col': 4,
'well_id': 124,
},
125: {
'col_and_row': u'F5',
'row': 6,
'col': 5,
'well_id': 125,
},
126: {
'col_and_row': u'F6',
'row': 6,
'col': 6,
'well_id': 126,
},
127: {
'col_and_row': u'F7',
'row': 6,
'col': 7,
'well_id': 127,
},
128: {
'col_and_row': u'F8',
'row': 6,
'col': 8,
'well_id': 128,
},
129: {
'col_and_row': u'F9',
'row': 6,
'col': 9,
'well_id': 129,
},
130: {
'col_and_row': u'F10',
'row': 6,
'col': 10,
'well_id': 130,
},
131: {
'col_and_row': u'F11',
'row': 6,
'col': 11,
'well_id': 131,
},
132: {
'col_and_row': u'F12',
'row': 6,
'col': 12,
'well_id': 132,
},
133: {
'col_and_row': u'F13',
'row': 6,
'col': 13,
'well_id': 133,
},
134: {
'col_and_row': u'F14',
'row': 6,
'col': 14,
'well_id': 134,
},
135: {
'col_and_row': u'F15',
'row': 6,
'col': 15,
'well_id': 135,
},
136: {
'col_and_row': u'F16',
'row': 6,
'col': 16,
'well_id': 136,
},
137: {
'col_and_row': u'F17',
'row': 6,
'col': 17,
'well_id': 137,
},
138: {
'col_and_row': u'F18',
'row': 6,
'col': 18,
'well_id': 138,
},
139: {
'col_and_row': u'F19',
'row': 6,
'col': 19,
'well_id': 139,
},
140: {
'col_and_row': u'F20',
'row': 6,
'col': 20,
'well_id': 140,
},
141: {
'col_and_row': u'F21',
'row': 6,
'col': 21,
'well_id': 141,
},
142: {
'col_and_row': u'F22',
'row': 6,
'col': 22,
'well_id': 142,
},
143: {
'col_and_row': u'F23',
'row': 6,
'col': 23,
'well_id': 143,
},
144: {
'col_and_row': u'F24',
'row': 6,
'col': 24,
'well_id': 144,
},
145: {
'col_and_row': u'G1',
'row': 7,
'col': 1,
'well_id': 145,
},
146: {
'col_and_row': u'G2',
'row': 7,
'col': 2,
'well_id': 146,
},
147: {
'col_and_row': u'G3',
'row': 7,
'col': 3,
'well_id': 147,
},
148: {
'col_and_row': u'G4',
'row': 7,
'col': 4,
'well_id': 148,
},
149: {
'col_and_row': u'G5',
'row': 7,
'col': 5,
'well_id': 149,
},
150: {
'col_and_row': u'G6',
'row': 7,
'col': 6,
'well_id': 150,
},
151: {
'col_and_row': u'G7',
'row': 7,
'col': 7,
'well_id': 151,
},
152: {
'col_and_row': u'G8',
'row': 7,
'col': 8,
'well_id': 152,
},
153: {
'col_and_row': u'G9',
'row': 7,
'col': 9,
'well_id': 153,
},
154: {
'col_and_row': u'G10',
'row': 7,
'col': 10,
'well_id': 154,
},
155: {
'col_and_row': u'G11',
'row': 7,
'col': 11,
'well_id': 155,
},
156: {
'col_and_row': u'G12',
'row': 7,
'col': 12,
'well_id': 156,
},
157: {
'col_and_row': u'G13',
'row': 7,
'col': 13,
'well_id': 157,
},
158: {
'col_and_row': u'G14',
'row': 7,
'col': 14,
'well_id': 158,
},
159: {
'col_and_row': u'G15',
'row': 7,
'col': 15,
'well_id': 159,
},
160: {
'col_and_row': u'G16',
'row': 7,
'col': 16,
'well_id': 160,
},
161: {
'col_and_row': u'G17',
'row': 7,
'col': 17,
'well_id': 161,
},
162: {
'col_and_row': u'G18',
'row': 7,
'col': 18,
'well_id': 162,
},
163: {
'col_and_row': u'G19',
'row': 7,
'col': 19,
'well_id': 163,
},
164: {
'col_and_row': u'G20',
'row': 7,
'col': 20,
'well_id': 164,
},
165: {
'col_and_row': u'G21',
'row': 7,
'col': 21,
'well_id': 165,
},
166: {
'col_and_row': u'G22',
'row': 7,
'col': 22,
'well_id': 166,
},
167: {
'col_and_row': u'G23',
'row': 7,
'col': 23,
'well_id': 167,
},
168: {
'col_and_row': u'G24',
'row': 7,
'col': 24,
'well_id': 168,
},
169: {
'col_and_row': u'H1',
'row': 8,
'col': 1,
'well_id': 169,
},
170: {
'col_and_row': u'H2',
'row': 8,
'col': 2,
'well_id': 170,
},
171: {
'col_and_row': u'H3',
'row': 8,
'col': 3,
'well_id': 171,
},
172: {
'col_and_row': u'H4',
'row': 8,
'col': 4,
'well_id': 172,
},
173: {
'col_and_row': u'H5',
'row': 8,
'col': 5,
'well_id': 173,
},
174: {
'col_and_row': u'H6',
'row': 8,
'col': 6,
'well_id': 174,
},
175: {
'col_and_row': u'H7',
'row': 8,
'col': 7,
'well_id': 175,
},
176: {
'col_and_row': u'H8',
'row': 8,
'col': 8,
'well_id': 176,
},
177: {
'col_and_row': u'H9',
'row': 8,
'col': 9,
'well_id': 177,
},
178: {
'col_and_row': u'H10',
'row': 8,
'col': 10,
'well_id': 178,
},
179: {
'col_and_row': u'H11',
'row': 8,
'col': 11,
'well_id': 179,
},
180: {
'col_and_row': u'H12',
'row': 8,
'col': 12,
'well_id': 180,
},
181: {
'col_and_row': u'H13',
'row': 8,
'col': 13,
'well_id': 181,
},
182: {
'col_and_row': u'H14',
'row': 8,
'col': 14,
'well_id': 182,
},
183: {
'col_and_row': u'H15',
'row': 8,
'col': 15,
'well_id': 183,
},
184: {
'col_and_row': u'H16',
'row': 8,
'col': 16,
'well_id': 184,
},
185: {
'col_and_row': u'H17',
'row': 8,
'col': 17,
'well_id': 185,
},
186: {
'col_and_row': u'H18',
'row': 8,
'col': 18,
'well_id': 186,
},
187: {
'col_and_row': u'H19',
'row': 8,
'col': 19,
'well_id': 187,
},
188: {
'col_and_row': u'H20',
'row': 8,
'col': 20,
'well_id': 188,
},
189: {
'col_and_row': u'H21',
'row': 8,
'col': 21,
'well_id': 189,
},
190: {
'col_and_row': u'H22',
'row': 8,
'col': 22,
'well_id': 190,
},
191: {
'col_and_row': u'H23',
'row': 8,
'col': 23,
'well_id': 191,
},
192: {
'col_and_row': u'H24',
'row': 8,
'col': 24,
'well_id': 192,
},
193: {
'col_and_row': u'I1',
'row': 9,
'col': 1,
'well_id': 193,
},
194: {
'col_and_row': u'I2',
'row': 9,
'col': 2,
'well_id': 194,
},
195: {
'col_and_row': u'I3',
'row': 9,
'col': 3,
'well_id': 195,
},
196: {
'col_and_row': u'I4',
'row': 9,
'col': 4,
'well_id': 196,
},
197: {
'col_and_row': u'I5',
'row': 9,
'col': 5,
'well_id': 197,
},
198: {
'col_and_row': u'I6',
'row': 9,
'col': 6,
'well_id': 198,
},
199: {
'col_and_row': u'I7',
'row': 9,
'col': 7,
'well_id': 199,
},
200: {
'col_and_row': u'I8',
'row': 9,
'col': 8,
'well_id': 200,
},
201: {
'col_and_row': u'I9',
'row': 9,
'col': 9,
'well_id': 201,
},
202: {
'col_and_row': u'I10',
'row': 9,
'col': 10,
'well_id': 202,
},
203: {
'col_and_row': u'I11',
'row': 9,
'col': 11,
'well_id': 203,
},
204: {
'col_and_row': u'I12',
'row': 9,
'col': 12,
'well_id': 204,
},
205: {
'col_and_row': | |
yet made a choice.\nThe current line is: " + currentLine)
print("Make sure this Dependent section depends upon a Random section, and make sure that Random section must always be visited before this Dependent section.\n")
return -26
if chosenSubelement >= myNumChoices:
print("\nError! This Dependent section does not have enough subsections. The section it depends on chose element #" + chosenSubelement + " but this Dependent section only has #" + myNumChoices + " subsections.\nThe current line is: " + currentLine)
print("\nMake sure this Dependent section depends upon the correct Random section, make sure that the Random section and the Dependent section have the same number of subsections.")
return -28
print(currentLine, file=saveChoicesFile, end='')
print(chosenSubelement, file=saveChoicesFile)
print('\t'+str(chosenSubelement), file=txtChoicesFile, end='')
global globalCsvNames, globalCsvData
globalCsvNames += ",v" + myVariableName.replace("-", "_")
globalCsvData += "," + str(chosenSubelement+1)
for i in range(chosenSubelement):
retval = skipElement(inFile, currentLine)
if (retval < 0): return retval
retval = recursiveGenerate(inFile, outFile, saveChoicesFile, txtChoicesFile, myVariableName + "-" + str(chosenSubelement+1), dictionaryRepeatSame, dictionaryRepeatNever, dictionaryMatchSame, dictionaryMatchDifferent, dictionaryMatchOnlyOneEver, dictionaryMaxSelectionsPerSubPoint, startString, endString, currentString, currentPlusIntervalString, dictionaryLastChoice)
if (retval < 0): return retval
next_line = ''
while (not "*end_dependent* "+myLabel+" " in next_line):
next_line = inFile.readline()
if not next_line: #readline returns an empty string when it reaches EOF
print("\nError! Could not find *end_dependent* for the Dependent section with the label: " + myLabel)
print("The program finished following a subsection for this Dependent section but was unable to find this Dependent section's end tag. Make sure the end tag is in the file. Make sure the Random and Constant and Dependent sections have the correct number of subsections.")
return -27
next_line = next_line.rstrip('\n')+' '
return 1
def intersection(list1, list2):
"""
Returns the intersection and then the items in list2 that are not in list 1.
"""
int_dict = {}
not_int_dict = {}
list1_dict = {}
for e in list1: list1_dict[e] = 1
for e in list2:
if e in list1_dict: int_dict[e] = 1
else: not_int_dict[e] = 1
return [list(int_dict.keys()), list(not_int_dict.keys())]
def nonUniformShuffle(freeToChoose, nonUniformFirstSubPoint, nonUniformFirstSubPointPercentage):
"""
Shuffle the list, but obey any nonUniformFirstSubPoint percentage.
"""
if logging: print('freeToChoose: ' + str(freeToChoose))
shuffle(freeToChoose)
if (nonUniformFirstSubPoint and (0 in freeToChoose)):
freeToChoose.remove(0)
if (random()*100. < nonUniformFirstSubPointPercentage):
freeToChoose.insert(0, 0)
else:
freeToChoose.append(0)
def getChoiceForRepeatSame(myLabel, dictionaryRepeatSame, freeToChoose, myVariableName):
"""
Get a Random section's choice if RepeatSame.
"""
if myLabel in dictionaryRepeatSame:
if dictionaryRepeatSame[myLabel] in freeToChoose: chosenSubelement = dictionaryRepeatSame[myLabel]
else:
print("\nError! Cannot satisfy both Repeat Same (aka 'Same when repeat') and either Match Different or Match Only One Ever.")
print("The label for this Random section: " + str(myLabel) + ". The 'key' which contains the label and also a concatenated list of the iterations for any ongoing repetitions: " + str(myVariableName))
print("\nAny given text file was supposed to choose the same choice each time it encountered this Random section (so this random section, or one of its parents must Repeat). All of the matched text files were supposed to choose different choices on the same iteration of the repetition. The program was not able to satisfy both constraints. The most likely cause is that not all of the matched files encountered this Random section on the same iteration of a parent Random section.")
print("\nFor example, if the first text file chose the first choice on the first iteration, then the second file did not encounter this Random section (due to a different choice in a Random parent), then the first file did not encounter this Random section in the second iteration, and finally the second file chose the first choice on the second iteration (a valid choice since it has not yet chosen anything and the other file did not choose on this repetition), then on any future repetition if they both encounter this Random section they will not be able to satisfy both constraints.")
print("\nThis error may not always occur because the files may choose differently by chance, or because they choose the same but never encounter this Random section on the same iteration.")
print("To alleviate this problem: remove one of the constraints (Repeat Same, Match Different, or Match Only One Ever), add more choices, reduce the number of matched files, or make the parent Random section Match Same so that all matched files encounter this Random section on the same iterations.")
return -14
else: chosenSubelement = freeToChoose[0]
return chosenSubelement
def getChoiceForDifferentDouble(repeatDifferentDoublePercentage, dictionaryLastChoice, myLabel, freeToChoose):
"""
Get a Random section's choice if RepeatDifferentDouble.
"""
if random()*100. < repeatDifferentDoublePercentage: chosenSubelement = dictionaryLastChoice[myLabel]
else:
freeToChoose.remove(dictionaryLastChoice[myLabel])
freeToChoose += [dictionaryLastChoice[myLabel]] # needed in case there is only one choice
chosenSubelement = freeToChoose[0]
return chosenSubelement
def getChoiceForMatchSame(repeatSame, myLabel, myVariableName, dictionaryMatchSame, dictionaryRepeatSame, repeatNever, dictionaryRepeatNever):
"""
Get a Random section's choice if MatchSame.
"""
if repeatSame and myLabel in dictionaryRepeatSame and dictionaryMatchSame[myVariableName] != dictionaryRepeatSame[myLabel]:
print("\nError! Cannot satisfy both Match Same and Repeat Same (aka 'Same when repeat').")
print("The label for this Random section: " + str(myLabel) + ". The 'key' which contains the label and also a concatenated list of the iterations for any ongoing repetitions: " + str(myVariableName))
print("\nThis Random section or one of its parents repeats. This section is supposed to always choose the same result as it has previously chosen, and is supposed to choose the same result as the matched text files chose on the same iteration of the repetition. The program was not able to satisfy both requirements. Most likely this random section is within another random section, and that parent random section does not use Match Same. So this section does not run on the same iterations for all the matched files. In the first iteration it did run for this file, no previous file had chosen this section, and this file chose differently than the others. Then in a future iteration, this file and a previous one both ran, putting the two requirements in conflict.")
print("\nTo solve this problem, make the parent repeating section Match Same, or remove one of the two restrictions. Alternatively, if the current template file is run again there is a chance that the text files will choose similarly and this error will not appear.")
return -16
if repeatNever and myLabel in dictionaryRepeatNever and dictionaryMatchSame[myVariableName] in dictionaryRepeatNever[myLabel]:
print("\nError! Cannot satisfy both Match Same and Repeat Never (aka 'Always different when repeat').")
print("The label for this Random section: " + str(myLabel) + ". The 'key' which contains the label and also a concatenated list of the iterations for any ongoing repetitions: " + str(myVariableName))
print("\nThis Random section or one of its parents repeats. This section is supposed to always choose the same result as the matched text files chose on the same iteration of the repetition, and this text file is not supposed to contain duplicates. The program was not able to satisfy both requirements. Most likely this random section is within another random section, and that parent random section does not use Match Same. So this section does not run on the same iterations for all the matched files. This text file made a choice in an iteration that no previous file chose during. Then in a later iteration, the previous files made that same choice and now this file cannot satisfy both requirements.")
print("\nTo solve this problem, make the parent repeating section Match Same, or remove one of the two restrictions. Alternatively, if the current template file is run again there may be a chance that the text files will choose similarly and this error will not appear.")
return -17
chosenSubelement = dictionaryMatchSame[myVariableName]
if logging: print('This section is MatchSame, and a previous resume in the batch has already chosen: ', chosenSubelement)
return chosenSubelement
def getChosenSubElement(repeatSame, repeatNever, repeatNoDoubles, repeatDifferentDouble, repeatDifferentDoublePercentage, nonUniformFirstSubPoint, nonUniformFirstSubPointPercentage, matchMaxSelectionsPerSubPoint, maxSelectionsPerSubPointInteger, matchSame, matchDifferent, matchOnlyOneEver, myVariableName, myNumChoices, myLabel, dictionaryRepeatSame, dictionaryRepeatNever, dictionaryMatchSame, dictionaryMatchDifferent, dictionaryMatchOnlyOneEver, dictionaryMaxSelectionsPerSubPoint, dictionaryLastChoice, minimumNumberOfEntries, maximumNumberOfEntries):
"""
Get a Random section's chosen subelement based on RepeatNever, MatchOnlyOneEver, MatchSame, etc.
"""
freeToChoose = list(range(myNumChoices))
if | |
<reponame>illbebach/Dying-Bullish-Euphoria<gh_stars>1-10
# Program for testing "no new highs lately" or "dying bullish euphoria" (DBE)
# strategy.
# Author: <NAME>
# Date: June 2019
# New to version 2.0
# * Added a ticker price threshold for reentry. When the ticker (index) falls
# below a user entered percent of the most recent new high then the program
# reenters the market regardless of the signal (bull or bear).
# New to version 1.2
# * Added plotting
# * Counts days since last M-day high
# New to version 1.1
# * Can upload data from a spreadsheet. For some reason downloading from
# this program only retrieves data since about 1970 for the S&P 500.
# However, you can manually download data from Y! that goes back to 1950.
# Similar issues for other indices. Hence uploading from a spreadsheet
# may be prefered.
# * Fixes copy of dataframe. v 1.0 did this incorrectly, so that the original
# dataframe was modified in the loop. I don't think it caused any issues,
# but it was not consistent with my intention.
# Import Modules
import pandas as pd
import datetime
import numpy as np
#import math
#import json
#%% Upload historical prices from an Excel spreadsheet
# If you want to download data from Yahoo! don't run this cell.
#
# For correct formating download data from Yahoo! into csv format and then
# simply "save as" a .xlsx spreadsheet.
# If you run this cell you probably do not want to run the next cell which
# downloads prices from Y!
# File name and sheet name
# Most recent data should be at the bottom.
eodDataFile = "snp500data_2019-6-18.xlsx"
sheet = "Data"
# Reads in col headings as str.
origDataDF = pd.read_excel(eodDataFile, sheet, index_col = 0)
# Do this if you want a smaller dataset for testing, checking post-discovery
# results, etc.
#origDataDF = origDataDF.tail(1000)
#%% Download Historical stock prices from Y!.
# Should include date, open, high, low, close, adjusted close, and volume.
# If you imported data from a spreadsheet above don't run this cell.
# Import module to download stock prices from Yahoo!
import pandas_datareader as web
# NOTE: this cell need not be run every time a parameter is changed, as the
# data in this dataframe is not changed elsewhere in the program. Only run
# this cell when parameters for this cell are changed! Otherwise you are
# querying Yahoo for data unnecessarily.
# Cell Parameters
tkr = '^IXIC' # Stock ticker that data will be downloaded for
# First trading day for SPY etf is 1993 Jan 29.
# By trial and error it seems the oldest date DataReader will allow is
# 1970 Jan 1, even tho S&P 500 data on Yahoo! goes back to approx 1950 Jan 3
# For NASDAQ (^IXIC) 1971, Feb 5
startDate = datetime.date(1971, 2, 1) # start date (yr, mo, day)
endDate = datetime.date(2019, 8, 2) # end date
# Download data. Most recent data is on bottom.
origDataDF = web.DataReader(tkr, 'yahoo', startDate, endDate)
#%% Parameters
reentryPct = 0.01 # If the price drops below (reentryPct * most recent New
# high) then reenter position regardless of signal.
# To disable set to 0
M = 107 # Looking for a new M-day hi
N = 134 # in last N days
K = 250 # Starting point for calculations. Should be at least
# as large as M+N
series = 'Adj Close' # Can use 'Close', 'High', 'Low', 'Open', 'Adj Close'
# For indices 'Close' = 'Adj Close' (I think)
###################################################
# Have we had a new M-day high in the last N days?
# First copy original downloaded data into new dataframe so we get clean data
# each time we rerun this cell.
eodDF = origDataDF.copy(deep = True)
# Find new M-day highs (True) and calculate reentry price
eodDF['MdayHi'] = eodDF[series].rolling(M).max()
eodDF['newHi'] = np.where(eodDF[series] == eodDF['MdayHi'], True, False)
eodDF['rePt'] = reentryPct*eodDF['MdayHi']
# Count days since last new M-day high.
# I found this soln on Stack Overflow.
# First run comparison to find where new contiguous groups begin (True)
eodDF['dSinceNewHi'] = (eodDF['newHi'] != eodDF['newHi'].shift(1))
# Now use cumsum() (cummulative sum) to count the number of "groups"
eodDF['dSinceNewHi'] = eodDF['dSinceNewHi'].cumsum()
# Now groupby() with cumcount() to form running count of each group. This
# counts first occurance as 0, which is correct when we transition to a new
# high (Trues), but is 1 too small when we transistion to "not a new high"
# (false). We are counting days since a new high (Falses) so add 1.
eodDF['dSinceNewHi'] = eodDF.groupby('dSinceNewHi').cumcount() + 1
# Finally, all occurances of 'True' in 'newHi' col yield a corresponding 0 in
# 'dSinceNewHi' col.
eodDF.loc[eodDF['newHi'] == True, 'dSinceNewHi'] = 0
# Have we had a new M-day high in the last N days?
eodDF.loc[eodDF['dSinceNewHi'] < N, 'signal'] = 'bull'
eodDF.loc[eodDF['signal'] != 'bull', 'signal'] = 'bear'
# Erase any signals prior to start of tracking
eodDF.loc[eodDF.index.values < eodDF.index.values[K], 'signal'] = np.nan
# IMPORTANT: we are assuming the signal is an end-of-day signal. So when
# the signal changes from 'bear' to 'bull' we would purchase the tkr at market
# close. We would therefor be in the market the following day. So there is a
# one-day lag between the signal and returns. The shift moves all signals
# foward one day. We then change the wording: bull=True, bear=False.
eodDF['inMkt'] = eodDF['signal'].shift(1)
eodDF['inMkt'] = eodDF['inMkt'].where(eodDF['inMkt'] == 'bull', False)
eodDF['inMkt'] = eodDF['inMkt'].where(eodDF['inMkt'] == False, True)
# Set values to False prior to when we start tracking. First possible valid
# signal day occurs at index M+N, but we will not be in the market that day.
eodDF.loc[eodDF.index.values <= eodDF.index.values[K], 'inMkt'] = False
############################################################################
# Now we calculate reetnry points due to price crossing below user set
# threashold. This will trigger if the signal is bear but the price has dropped
# below a user set percent of the most recent new high. We will then get back
# into the market and stay there until a new high is reached again.
# Create new column in dataframe, populate with NaN
eodDF['reentrySignal'] = np.nan
# Retrieve indexes where price is below reentry point and signal is 'bear'.
# If the signal rises above the reentry point while the signal is still 'bear'
# that will not be captured here, so we will forward fill below.
idxList = eodDF.loc[
(eodDF.Low < eodDF.rePt) & (eodDF.signal == 'bear')].index
# We want to be in the market on these days
eodDF.loc[idxList, 'reentrySignal'] = True
# Marker for a new high; turn off reentrySignal
eodDF.loc[eodDF.dSinceNewHi == 0, 'reentrySignal'] = False
# Now forward fill. True will forward fill until we hit the False marker.
# False will forward fill until it hits a True.
eodDF['reentrySignal'] = eodDF['reentrySignal'].fillna(method = 'ffill')
# All is good except that we need to extend the sequence of Trues by 1 so that
# we can transfer them to the inMkt column. Otherwise we'll be in the market
# until we hit a new high (good) and then we will be out one day (bad) before
# jumping back in.
# This gets the True where we need it.
eodDF['reentrySignal'] = ( eodDF['reentrySignal'] +
eodDF['reentrySignal'].shift(1) )
# But now we have a bunch of 2s that should be 1s. Fix that.
eodDF.loc[ eodDF['reentrySignal'] == 2, 'reentrySignal' ] = 1
# Not necessary, but change back to Trues and Falses
eodDF.loc[ eodDF['reentrySignal'] == 0, 'reentrySignal' ] = False
eodDF.loc[ eodDF['reentrySignal'] == 1, 'reentrySignal' ] = True
# Now copy the Trues over the inMkt column
eodDF.loc[ eodDF['reentrySignal'] == True, 'inMkt' ] = True
#%%###############################
# Calculate returns and statistics
# Calculate daily tkr returns. shift(1) is previous day's data
eodDF['tkrRtnDay'] = eodDF['Adj Close']/eodDF['Adj Close'].shift(1)
# Calculate running return. Note that first valid sell signal occurs at least
# M+N days after first day of data. Must estable M-day hi followed by N days
# w/o a new M-day hi. So this col only makes sence for index location
# past M+N
eodDF['tkrCumRtn'] = eodDF['Adj Close']/eodDF['Adj Close'][K]
# Calculate running CAGR.
# Intermediate calculatioin: years since starting date at M+N index
days_per_yr = 365.2422
eodDF['yrs'] = (eodDF.index.values - eodDF.index.values[K]).astype(
'timedelta64[D]') / (days_per_yr * np.timedelta64(1, 'D'))
eodDF['tkrCAGR'] = eodDF['tkrCumRtn']**(1 / eodDF['yrs'])
# Calculate daily return for algorithm. Same as return for ticker, except | |
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""nova HACKING file compliance testing
built on top of pep8.py
"""
import inspect
import logging
import os
import re
import sys
import tokenize
import traceback
import warnings
import pep8
# Don't need this for testing
logging.disable('LOG')
#N1xx comments
#N2xx except
#N3xx imports
#N4xx docstrings
#N5xx dictionaries/lists
#N6xx Calling methods
#N7xx localization
IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session']
DOCSTRING_TRIPLE = ['"""', "'''"]
VERBOSE_MISSING_IMPORT = False
def is_import_exception(mod):
return mod in IMPORT_EXCEPTIONS or \
any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS)
def import_normalize(line):
# convert "from x import y" to "import x.y"
# handle "from x import y as z" to "import x.y as z"
split_line = line.split()
if (line.startswith("from ") and "," not in line and
split_line[2] == "import" and split_line[3] != "*" and
split_line[1] != "__future__" and
(len(split_line) == 4 or
(len(split_line) == 6 and split_line[4] == "as"))):
mod = split_line[3]
return "import %s.%s" % (split_line[1], split_line[3])
else:
return line
def nova_todo_format(physical_line):
"""Check for 'TODO()'.
nova HACKING guide recommendation for TODO:
Include your name with TODOs as in "#TODO(termie)"
N101
"""
pos = physical_line.find('TODO')
pos1 = physical_line.find('TODO(')
pos2 = physical_line.find('#') # make sure its a comment
if (pos != pos1 and pos2 >= 0 and pos2 < pos):
return pos, "NOVA N101: Use TODO(NAME)"
def nova_except_format(logical_line):
"""Check for 'except:'.
nova HACKING guide recommends not using except:
Do not write "except:", use "except Exception:" at the very least
N201
"""
if logical_line.startswith("except:"):
return 6, "NOVA N201: no 'except:' at least use 'except Exception:'"
def nova_except_format_assert(logical_line):
"""Check for 'assertRaises(Exception'.
nova HACKING guide recommends not using assertRaises(Exception...):
Do not use overly broad Exception type
N202
"""
if logical_line.startswith("self.assertRaises(Exception"):
return 1, "NOVA N202: assertRaises Exception too broad"
def nova_one_import_per_line(logical_line):
"""Check for import format.
nova HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
BAD: from nova.rpc.common import RemoteError, LOG
N301
"""
pos = logical_line.find(',')
parts = logical_line.split()
if pos > -1 and (parts[0] == "import" or
parts[0] == "from" and parts[2] == "import") and \
not is_import_exception(parts[1]):
return pos, "NOVA N301: one import per line"
_missingImport = set([])
def nova_import_module_only(logical_line):
"""Check for import module only.
nova HACKING guide recommends importing only modules:
Do not import objects, only modules
N302 import only modules
N303 Invalid Import
N304 Relative Import
"""
def importModuleCheck(mod, parent=None, added=False):
"""
If can't find module on first try, recursively check for relative
imports
"""
current_path = os.path.dirname(pep8.current_file)
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
valid = True
if parent:
if is_import_exception(parent):
return
parent_mod = __import__(parent, globals(), locals(),
[mod], -1)
valid = inspect.ismodule(getattr(parent_mod, mod))
else:
__import__(mod, globals(), locals(), [], -1)
valid = inspect.ismodule(sys.modules[mod])
if not valid:
if added:
sys.path.pop()
added = False
return logical_line.find(mod), ("NOVA N304: No "
"relative imports. '%s' is a relative import"
% logical_line)
return logical_line.find(mod), ("NOVA N302: import only "
"modules. '%s' does not import a module"
% logical_line)
except (ImportError, NameError) as exc:
if not added:
added = True
sys.path.append(current_path)
return importModuleCheck(mod, parent, added)
else:
name = logical_line.split()[1]
if name not in _missingImport:
if VERBOSE_MISSING_IMPORT:
print >> sys.stderr, ("ERROR: import '%s' failed: %s" %
(name, exc))
_missingImport.add(name)
added = False
sys.path.pop()
return
except AttributeError:
# Invalid import
return logical_line.find(mod), ("NOVA N303: Invalid import, "
"AttributeError raised")
# convert "from x import y" to " import x.y"
# convert "from x import y as z" to " import x.y"
import_normalize(logical_line)
split_line = logical_line.split()
if (logical_line.startswith("import ") and "," not in logical_line and
(len(split_line) == 2 or
(len(split_line) == 4 and split_line[2] == "as"))):
mod = split_line[1]
return importModuleCheck(mod)
# TODO(jogo) handle "from x import *"
#TODO(jogo): import template: N305
def nova_import_alphabetical(physical_line, line_number, lines):
"""Check for imports in alphabetical order.
nova HACKING guide recommendation for imports:
imports in human alphabetical order
N306
"""
# handle import x
# use .lower since capitalization shouldn't dictate order
split_line = import_normalize(physical_line.strip()).lower().split()
split_previous = import_normalize(lines[line_number - 2]
).strip().lower().split()
# with or without "as y"
length = [2, 4]
if (len(split_line) in length and len(split_previous) in length and
split_line[0] == "import" and split_previous[0] == "import"):
if split_line[1] < split_previous[1]:
return (0, "NOVA N306: imports not in alphabetical order (%s, %s)"
% (split_previous[1], split_line[1]))
def nova_docstring_start_space(physical_line):
"""Check for docstring not start with space.
nova HACKING guide recommendation for docstring:
Docstring should not start with space
N401
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
if (pos != -1 and len(physical_line) > pos + 1):
if (physical_line[pos + 3] == ' '):
return (pos, "NOVA N401: one line docstring should not start with"
" a space")
def nova_docstring_one_line(physical_line):
"""Check one line docstring end.
nova HACKING guide recommendation for one line docstring:
A one line docstring looks like this and ends in a period.
N402
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
if (pos != -1 and end and len(physical_line) > pos + 4):
if (physical_line[-5] != '.'):
return pos, "NOVA N402: one line docstring needs a period"
def nova_docstring_multiline_end(physical_line):
"""Check multi line docstring end.
nova HACKING guide recommendation for docstring:
Docstring should end on a new line
N403
"""
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
if (pos != -1 and len(physical_line) == pos):
print physical_line
if (physical_line[pos + 3] == ' '):
return (pos, "NOVA N403: multi line docstring end on new line")
FORMAT_RE = re.compile("%(?:"
"%|" # Ignore plain percents
"(\(\w+\))?" # mapping key
"([#0 +-]?" # flag
"(?:\d+|\*)?" # width
"(?:\.\d+)?" # precision
"[hlL]?" # length mod
"\w))") # type
class LocalizationError(Exception):
pass
def check_l18n():
"""Generator that checks token stream for localization errors.
Expects tokens to be ``send``ed one by one.
Raises LocalizationError if some error is found.
"""
while True:
try:
token_type, text, _, _, _ = yield
except GeneratorExit:
return
if token_type == tokenize.NAME and text == "_":
while True:
token_type, text, start, _, _ = yield
if token_type != tokenize.NL:
break
if token_type != tokenize.OP or text != "(":
continue # not a localization call
format_string = ''
while True:
token_type, text, start, _, _ = yield
if token_type == tokenize.STRING:
format_string += eval(text)
elif token_type == tokenize.NL:
pass
else:
break
if not format_string:
raise LocalizationError(start,
"NOVA N701: Empty localization string")
if token_type != tokenize.OP:
raise LocalizationError(start,
"NOVA N701: Invalid localization call")
if text != ")":
if text == "%":
raise LocalizationError(start,
"NOVA N702: Formatting operation should be outside"
" of localization method call")
elif text == "+":
raise LocalizationError(start,
"NOVA N702: Use bare string concatenation instead"
" of +")
else:
raise LocalizationError(start,
"NOVA N702: Argument to _ must be just a string")
format_specs = FORMAT_RE.findall(format_string)
positional_specs = [(key, spec) for key, spec in format_specs
if not key and spec]
# not spec means %%, key means %(smth)s
if len(positional_specs) > 1:
raise LocalizationError(start,
"NOVA N703: Multiple positional placeholders")
def nova_localization_strings(logical_line, tokens):
"""Check localization in line.
N701: bad localization call
N702: complex expression instead of string as argument to _()
N703: multiple positional placeholders
"""
gen = check_l18n()
next(gen)
try:
map(gen.send, tokens)
gen.close()
except LocalizationError as e:
return e.args
#TODO(jogo) Dict and list objects
current_file = ""
def readlines(filename):
"""Record the current file being tested."""
pep8.current_file = filename
return open(filename).readlines()
def add_nova():
"""Monkey patch in nova guidelines.
Look for functions that start with nova_ and have arguments
and add them to pep8 module
Assumes you know how to write pep8.py checks
"""
for name, function in globals().items():
if not inspect.isfunction(function):
continue
args = inspect.getargspec(function)[0]
if args and name.startswith("nova"):
exec("pep8.%s = %s" % (name, name))
if __name__ == "__main__":
#include nova path
sys.path.append(os.getcwd())
| |
Task = property(__Task.value, __Task.set, None, None)
# Attribute Name uses Python identifier Name
__Name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Name'), 'Name', '__avm_Workflow__Name', pyxb.binding.datatypes.string)
__Name._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 555, 4)
__Name._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 555, 4)
Name = property(__Name.value, __Name.set, None, None)
_ElementMap.update({
__Task.name() : __Task
})
_AttributeMap.update({
__Name.name() : __Name
})
Namespace.addCategoryObject('typeBinding', u'Workflow', Workflow_)
# Complex type {avm}WorkflowTaskBase with content type EMPTY
class WorkflowTaskBase_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {avm}WorkflowTaskBase with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'WorkflowTaskBase')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 557, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute Name uses Python identifier Name
__Name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Name'), 'Name', '__avm_WorkflowTaskBase__Name', pyxb.binding.datatypes.string)
__Name._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 558, 4)
__Name._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 558, 4)
Name = property(__Name.value, __Name.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__Name.name() : __Name
})
Namespace.addCategoryObject('typeBinding', u'WorkflowTaskBase', WorkflowTaskBase_)
# Complex type {avm}Settings with content type EMPTY
class Settings_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {avm}Settings with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Settings')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 576, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'Settings', Settings_)
# Complex type {avm}DesignDomainFeature with content type EMPTY
class DesignDomainFeature_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {avm}DesignDomainFeature with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'DesignDomainFeature')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 588, 2)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'DesignDomainFeature', DesignDomainFeature_)
# Complex type {avm}Value with content type ELEMENT_ONLY
class Value_ (ValueNode_):
"""Complex type {avm}Value with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Value')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 107, 2)
_ElementMap = ValueNode_._ElementMap.copy()
_AttributeMap = ValueNode_._AttributeMap.copy()
# Base type is ValueNode_
# Element ValueExpression uses Python identifier ValueExpression
__ValueExpression = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'ValueExpression'), 'ValueExpression', '__avm_Value__ValueExpression', False, pyxb.utils.utility.Location(u'avm.xsd', 111, 10), )
ValueExpression = property(__ValueExpression.value, __ValueExpression.set, None, None)
# Element DataSource uses Python identifier DataSource
__DataSource = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'DataSource'), 'DataSource', '__avm_Value__DataSource', True, pyxb.utils.utility.Location(u'avm.xsd', 112, 10), )
DataSource = property(__DataSource.value, __DataSource.set, None, None)
# Attribute DimensionType uses Python identifier DimensionType
__DimensionType = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'DimensionType'), 'DimensionType', '__avm_Value__DimensionType', DimensionTypeEnum)
__DimensionType._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 114, 8)
__DimensionType._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 114, 8)
DimensionType = property(__DimensionType.value, __DimensionType.set, None, None)
# Attribute DataType uses Python identifier DataType
__DataType = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'DataType'), 'DataType', '__avm_Value__DataType', DataTypeEnum)
__DataType._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 115, 8)
__DataType._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 115, 8)
DataType = property(__DataType.value, __DataType.set, None, None)
# Attribute Dimensions uses Python identifier Dimensions
__Dimensions = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Dimensions'), 'Dimensions', '__avm_Value__Dimensions', pyxb.binding.datatypes.string)
__Dimensions._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 116, 8)
__Dimensions._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 116, 8)
Dimensions = property(__Dimensions.value, __Dimensions.set, None, None)
# Attribute Unit uses Python identifier Unit
__Unit = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Unit'), 'Unit', '__avm_Value__Unit', pyxb.binding.datatypes.string)
__Unit._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 117, 8)
__Unit._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 117, 8)
Unit = property(__Unit.value, __Unit.set, None, None)
# Attribute ID inherited from {avm}ValueNode
_ElementMap.update({
__ValueExpression.name() : __ValueExpression,
__DataSource.name() : __DataSource
})
_AttributeMap.update({
__DimensionType.name() : __DimensionType,
__DataType.name() : __DataType,
__Dimensions.name() : __Dimensions,
__Unit.name() : __Unit
})
Namespace.addCategoryObject('typeBinding', u'Value', Value_)
# Complex type {avm}FixedValue with content type ELEMENT_ONLY
class FixedValue_ (ValueExpressionType_):
"""Complex type {avm}FixedValue with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'FixedValue')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 121, 2)
_ElementMap = ValueExpressionType_._ElementMap.copy()
_AttributeMap = ValueExpressionType_._AttributeMap.copy()
# Base type is ValueExpressionType_
# Element Value uses Python identifier Value
__Value = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Value'), 'Value', '__avm_FixedValue__Value', False, pyxb.utils.utility.Location(u'avm.xsd', 125, 10), )
Value = property(__Value.value, __Value.set, None, None)
# Attribute Uncertainty uses Python identifier Uncertainty
__Uncertainty = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Uncertainty'), 'Uncertainty', '__avm_FixedValue__Uncertainty', pyxb.binding.datatypes.float)
__Uncertainty._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 127, 8)
__Uncertainty._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 127, 8)
Uncertainty = property(__Uncertainty.value, __Uncertainty.set, None, None)
_ElementMap.update({
__Value.name() : __Value
})
_AttributeMap.update({
__Uncertainty.name() : __Uncertainty
})
Namespace.addCategoryObject('typeBinding', u'FixedValue', FixedValue_)
# Complex type {avm}CalculatedValue with content type ELEMENT_ONLY
class CalculatedValue_ (ValueExpressionType_):
"""Complex type {avm}CalculatedValue with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'CalculatedValue')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 131, 2)
_ElementMap = ValueExpressionType_._ElementMap.copy()
_AttributeMap = ValueExpressionType_._AttributeMap.copy()
# Base type is ValueExpressionType_
# Element Expression uses Python identifier Expression
__Expression = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Expression'), 'Expression', '__avm_CalculatedValue__Expression', False, pyxb.utils.utility.Location(u'avm.xsd', 135, 10), )
Expression = property(__Expression.value, __Expression.set, None, None)
# Attribute Type uses Python identifier Type
__Type = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Type'), 'Type', '__avm_CalculatedValue__Type', CalculationTypeEnum, required=True)
__Type._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 137, 8)
__Type._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 137, 8)
Type = property(__Type.value, __Type.set, None, None)
_ElementMap.update({
__Expression.name() : __Expression
})
_AttributeMap.update({
__Type.name() : __Type
})
Namespace.addCategoryObject('typeBinding', u'CalculatedValue', CalculatedValue_)
# Complex type {avm}DerivedValue with content type EMPTY
class DerivedValue_ (ValueExpressionType_):
"""Complex type {avm}DerivedValue with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'DerivedValue')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 141, 2)
_ElementMap = ValueExpressionType_._ElementMap.copy()
_AttributeMap = ValueExpressionType_._AttributeMap.copy()
# Base type is ValueExpressionType_
# Attribute ValueSource uses Python identifier ValueSource
__ValueSource = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'ValueSource'), 'ValueSource', '__avm_DerivedValue__ValueSource', pyxb.binding.datatypes.IDREF, required=True)
__ValueSource._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 144, 8)
__ValueSource._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 144, 8)
ValueSource = property(__ValueSource.value, __ValueSource.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__ValueSource.name() : __ValueSource
})
Namespace.addCategoryObject('typeBinding', u'DerivedValue', DerivedValue_)
# Complex type {avm}ParametricValue with content type ELEMENT_ONLY
class ParametricValue_ (ValueExpressionType_):
"""Complex type {avm}ParametricValue with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'ParametricValue')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 196, 2)
_ElementMap = ValueExpressionType_._ElementMap.copy()
_AttributeMap = ValueExpressionType_._AttributeMap.copy()
# Base type is ValueExpressionType_
# Element Default uses Python identifier Default
__Default = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Default'), 'Default', '__avm_ParametricValue__Default', False, pyxb.utils.utility.Location(u'avm.xsd', 200, 10), )
Default = property(__Default.value, __Default.set, None, None)
# Element Maximum uses Python identifier Maximum
__Maximum = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Maximum'), 'Maximum', '__avm_ParametricValue__Maximum', False, pyxb.utils.utility.Location(u'avm.xsd', 201, 10), )
Maximum = property(__Maximum.value, __Maximum.set, None, None)
# Element Minimum uses Python identifier Minimum
__Minimum = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'Minimum'), 'Minimum', '__avm_ParametricValue__Minimum', False, pyxb.utils.utility.Location(u'avm.xsd', 202, 10), )
Minimum = property(__Minimum.value, __Minimum.set, None, None)
# Element AssignedValue uses Python identifier AssignedValue
__AssignedValue = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, u'AssignedValue'), 'AssignedValue', '__avm_ParametricValue__AssignedValue', False, pyxb.utils.utility.Location(u'avm.xsd', 203, 10), )
AssignedValue = property(__AssignedValue.value, __AssignedValue.set, None, None)
_ElementMap.update({
__Default.name() : __Default,
__Maximum.name() : __Maximum,
__Minimum.name() : __Minimum,
__AssignedValue.name() : __AssignedValue
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'ParametricValue', ParametricValue_)
# Complex type {avm}ProbabilisticValue with content type EMPTY
class ProbabilisticValue_ (ValueExpressionType_):
"""Complex type {avm}ProbabilisticValue with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = True
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'ProbabilisticValue')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 209, 2)
_ElementMap = ValueExpressionType_._ElementMap.copy()
_AttributeMap = ValueExpressionType_._AttributeMap.copy()
# Base type is ValueExpressionType_
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', u'ProbabilisticValue', ProbabilisticValue_)
# Complex type {avm}SecurityClassification with content type EMPTY
class SecurityClassification_ (DistributionRestriction_):
"""Complex type {avm}SecurityClassification with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'SecurityClassification')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 248, 2)
_ElementMap = DistributionRestriction_._ElementMap.copy()
_AttributeMap = DistributionRestriction_._AttributeMap.copy()
# Base type is DistributionRestriction_
# Attribute Notes inherited from {avm}DistributionRestriction
# Attribute Level uses Python identifier Level
__Level = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Level'), 'Level', '__avm_SecurityClassification__Level', pyxb.binding.datatypes.string)
__Level._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 251, 8)
__Level._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 251, 8)
Level = property(__Level.value, __Level.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__Level.name() : __Level
})
Namespace.addCategoryObject('typeBinding', u'SecurityClassification', SecurityClassification_)
# Complex type {avm}Proprietary with content type EMPTY
class Proprietary_ (DistributionRestriction_):
"""Complex type {avm}Proprietary with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'Proprietary')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 255, 2)
_ElementMap = DistributionRestriction_._ElementMap.copy()
_AttributeMap = DistributionRestriction_._AttributeMap.copy()
# Base type is DistributionRestriction_
# Attribute Notes inherited from {avm}DistributionRestriction
# Attribute Organization uses Python identifier Organization
__Organization = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, u'Organization'), 'Organization', '__avm_Proprietary__Organization', pyxb.binding.datatypes.string, required=True)
__Organization._DeclarationLocation = pyxb.utils.utility.Location(u'avm.xsd', 258, 8)
__Organization._UseLocation = pyxb.utils.utility.Location(u'avm.xsd', 258, 8)
Organization = property(__Organization.value, __Organization.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__Organization.name() : __Organization
})
Namespace.addCategoryObject('typeBinding', u'Proprietary', Proprietary_)
# Complex type {avm}ITAR with content type EMPTY
class ITAR_ (DistributionRestriction_):
"""Complex type {avm}ITAR with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, u'ITAR')
_XSDLocation = pyxb.utils.utility.Location(u'avm.xsd', 262, 2)
_ElementMap = DistributionRestriction_._ElementMap.copy()
_AttributeMap = DistributionRestriction_._AttributeMap.copy()
# Base type is DistributionRestriction_
# Attribute Notes inherited from {avm}DistributionRestriction
| |
= typeidlist[str(fulldetails["structureTypeID"])]
systemName = ESI.getSystemName(geographicinformation, str(fulldetails["solarSystemID"]))
regionName = ESI.getRegionName(geographicinformation, str(fulldetails["solarSystemID"]))
notifyingMessage = (pinger + " Sovereignty Structure Destroyed - [" + timestamp + "]\n" + bolders + "The " + structureType + " In " + systemName + " Has Been Destroyed!" + bolders + "\nLocation: " + getLink(systemName, ("http://evemaps.dotlan.net/system/" + systemName.replace(" ","_")), bolders) + " [" + getLink(regionName, ("http://evemaps.dotlan.net/map/" + regionName.replace(" ","_") + "/" + systemName.replace(" ","_")), bolders) + "]")
return notifyingMessage
def SovAllClaimAquiredMsg(timestamp, fulldetails, typeidlist, geographicinformation, bolders, pinger, accessToken):
systemName = ESI.getSystemName(geographicinformation, str(fulldetails["solarSystemID"]))
regionName = ESI.getRegionName(geographicinformation, str(fulldetails["solarSystemID"]))
corpDetails = ESI.getCorpData(fulldetails["corpID"])
corpName = corpDetails["name"]
if "alliance_id" in corpDetails:
allianceDetails = ESI.getAllianceData(corpDetails["alliance_id"])
allianceName = allianceDetails["name"]
else:
allianceName = "[No Alliance]"
notifyingMessage = (pinger + " Sovereignty Claim Acquired - [" + timestamp + "]\n" + bolders + "Sovereignty Has Been Acquired In " + systemName + "!" + bolders + "\nLocation: " + getLink(systemName, ("http://evemaps.dotlan.net/system/" + systemName.replace(" ","_")), bolders) + " [" + getLink(regionName, ("http://evemaps.dotlan.net/map/" + regionName.replace(" ","_") + "/" + systemName.replace(" ","_")), bolders) + "]\nOwner: " + getLink(corpName, ("http://evemaps.dotlan.net/corp/" + str(fulldetails["corpID"])), bolders) + " [" + getLink(allianceName, ("http://evemaps.dotlan.net/alliance/" + str(corpDetails["alliance_id"])), bolders) + "]")
return notifyingMessage
def SovAllClaimLostMsg(timestamp, fulldetails, typeidlist, geographicinformation, bolders, pinger, accessToken):
systemName = ESI.getSystemName(geographicinformation, str(fulldetails["solarSystemID"]))
regionName = ESI.getRegionName(geographicinformation, str(fulldetails["solarSystemID"]))
corpDetails = ESI.getCorpData(fulldetails["corpID"])
corpName = corpDetails["name"]
if "alliance_id" in corpDetails:
allianceDetails = ESI.getAllianceData(corpDetails["alliance_id"])
allianceName = allianceDetails["name"]
else:
allianceName = "[No Alliance]"
notifyingMessage = (pinger + " Sovereignty Claim Lost - [" + timestamp + "]\n" + bolders + "Sovereignty Has Been Lost In " + systemName + "!" + bolders + "\nLocation: " + getLink(systemName, ("http://evemaps.dotlan.net/system/" + systemName.replace(" ","_")), bolders) + " [" + getLink(regionName, ("http://evemaps.dotlan.net/map/" + regionName.replace(" ","_") + "/" + systemName.replace(" ","_")), bolders) + "]\nOwner: " + getLink(corpName, ("http://evemaps.dotlan.net/corp/" + str(fulldetails["corpID"])), bolders) + " [" + getLink(allianceName, ("http://evemaps.dotlan.net/alliance/" + str(corpDetails["alliance_id"])), bolders) + "]")
return notifyingMessage
def SovStructureSelfDestructRequested(timestamp, fulldetails, typeidlist, geographicinformation, bolders, pinger, accessToken):
structureType = typeidlist[str(fulldetails["structureTypeID"])]
systemName = ESI.getSystemName(geographicinformation, str(fulldetails["solarSystemID"]))
regionName = ESI.getRegionName(geographicinformation, str(fulldetails["solarSystemID"]))
requesterString = ESI.getFullCharacterLink(fulldetails["charID"], bolders)
notifyingMessage = (pinger + " Sovereignty Started Self-Destruct - [" + timestamp + "]\n" + bolders + "A Self-Destruct Request Has Been Made For The " + structureType + " In " + systemName + "!" + bolders + "\nLocation: " + getLink(systemName, ("http://evemaps.dotlan.net/system/" + systemName.replace(" ","_")), bolders) + " [" + getLink(regionName, ("http://evemaps.dotlan.net/map/" + regionName.replace(" ","_") + "/" + systemName.replace(" ","_")), bolders) + "]\nRequested By: " + requesterString + "\nDestruction Time: " + getRealTime(fulldetails["destructTime"]))
return notifyingMessage
def SovStructureSelfDestructFinished(timestamp, fulldetails, typeidlist, geographicinformation, bolders, pinger, accessToken):
structureType = typeidlist[str(fulldetails["structureTypeID"])]
systemName = ESI.getSystemName(geographicinformation, str(fulldetails["solarSystemID"]))
regionName = ESI.getRegionName(geographicinformation, str(fulldetails["solarSystemID"]))
notifyingMessage = (pinger + " Sovereignty Finished Self-Destruct - [" + timestamp + "]\n" + bolders + "The Self-Destruct Request For The " + structureType + " In " + systemName + " Has Completed!" + bolders + "\nLocation: " + getLink(systemName, ("http://evemaps.dotlan.net/system/" + systemName.replace(" ","_")), bolders) + " [" + getLink(regionName, ("http://evemaps.dotlan.net/map/" + regionName.replace(" ","_") + "/" + systemName.replace(" ","_")), bolders) + "]")
return notifyingMessage
def SovStructureSelfDestructCancel(timestamp, fulldetails, typeidlist, geographicinformation, bolders, pinger, accessToken):
structureType = typeidlist[str(fulldetails["structureTypeID"])]
systemName = ESI.getSystemName(geographicinformation, str(fulldetails["solarSystemID"]))
regionName = ESI.getRegionName(geographicinformation, str(fulldetails["solarSystemID"]))
cancellerString = ESI.getFullCharacterLink(fulldetails["charID"], bolders)
notifyingMessage = (pinger + " Sovereignty Cancelled Self-Destruct - [" + timestamp + "]\n" + bolders + "The Self-Destruct Request For The " + structureType + " In " + systemName + " Has Been Cancelled!" + bolders + "\nLocation: " + getLink(systemName, ("http://evemaps.dotlan.net/system/" + systemName.replace(" ","_")), bolders) + " [" + getLink(regionName, ("http://evemaps.dotlan.net/map/" + regionName.replace(" ","_") + "/" + systemName.replace(" ","_")), bolders) + "]\nCancelled By: " + cancellerString)
return notifyingMessage
def OrbitalAttacked(timestamp, fulldetails, typeidlist, geographicinformation, bolders, pinger, accessToken):
systemName = ESI.getSystemName(geographicinformation, str(fulldetails["solarSystemID"]))
regionName = ESI.getRegionName(geographicinformation, str(fulldetails["solarSystemID"]))
planetDetails = ESI.getPlanetDetails(fulldetails["planetID"])
planetName = planetDetails["name"]
attackerString = ESI.getFullCharacterLink(fulldetails["aggressorID"], bolders)
notifyingMessage = (pinger + " Customs Office Under Attack - [" + timestamp + "]\n" + bolders + "A Customs Office Is Under Attack!" + bolders + "\nLocation: " + getLink(systemName, ("http://evemaps.dotlan.net/system/" + systemName.replace(" ","_")), bolders) + " (" + planetName.replace(systemName, "Planet") + ") [" + getLink(regionName, ("http://evemaps.dotlan.net/map/" + regionName.replace(" ","_") + "/" + systemName.replace(" ","_")), bolders) + "]\nAttacker: " + attackerString + "\nHealth: " + str(round((float(fulldetails["shieldLevel"]) * 100), 2)) + "% Shield")
return notifyingMessage
def OrbitalReinforced(timestamp, fulldetails, typeidlist, geographicinformation, bolders, pinger, accessToken):
systemName = ESI.getSystemName(geographicinformation, str(fulldetails["solarSystemID"]))
regionName = ESI.getRegionName(geographicinformation, str(fulldetails["solarSystemID"]))
planetDetails = ESI.getPlanetDetails(fulldetails["planetID"])
planetName = planetDetails["name"]
attackerString = ESI.getFullCharacterLink(fulldetails["aggressorID"], bolders)
notifyingMessage = (pinger + " Customs Office Reinforced - [" + timestamp + "]\n" + bolders + "A Customs Office Has Been Reinforced!" + bolders + "\nLocation: " + getLink(systemName, ("http://evemaps.dotlan.net/system/" + systemName.replace(" ","_")), bolders) + " (" + planetName.replace(systemName, "Planet") + ") [" + getLink(regionName, ("http://evemaps.dotlan.net/map/" + regionName.replace(" ","_") + "/" + systemName.replace(" ","_")), bolders) + "]\nAttacker: " + attackerString + "\nComes Out At: " + getRealTime(fulldetails["reinforceExitTime"]))
return notifyingMessage
def TowerAlertMsg(timestamp, fulldetails, typeidlist, geographicinformation, bolders, pinger, accessToken):
posType = typeidlist[str(fulldetails["typeID"])]
systemName = ESI.getSystemName(geographicinformation, str(fulldetails["solarSystemID"]))
regionName = ESI.getRegionName(geographicinformation, str(fulldetails["solarSystemID"]))
moonDetails = ESI.getMoonDetails(fulldetails["moonID"])
moonName = moonDetails["name"]
attackerString = ESI.getFullCharacterLink(fulldetails["aggressorID"], bolders)
notifyingMessage = (pinger + " Tower Under Attack - [" + timestamp + "]\n" + bolders + "A(n) " + posType + " in " + systemName + " Is Under Attack!" + bolders + "\nLocation: " + getLink(systemName, ("http://evemaps.dotlan.net/system/" + systemName.replace(" ","_")), bolders) + " (" + moonName.replace(systemName, "Planet") + ") [" + getLink(regionName, ("http://evemaps.dotlan.net/map/" + regionName.replace(" ","_") + "/" + systemName.replace(" ","_")), bolders) + "]\nAttacker: " + attackerString + "\nHealth: " + str(round(float(fulldetails["shieldValue"] * 100), 2)) + "% Shield / " + str(round(float(fulldetails["armorValue"] * 100), 2)) + "% Armor / " + str(round(float(fulldetails["hullValue"] * 100), 2)) + "% Structure")
return notifyingMessage
def TowerResourceAlertMsg(timestamp, fulldetails, typeidlist, geographicinformation, bolders, pinger, accessToken):
posType = typeidlist[str(fulldetails["typeID"])]
systemName = ESI.getSystemName(geographicinformation, str(fulldetails["solarSystemID"]))
regionName = ESI.getRegionName(geographicinformation, str(fulldetails["solarSystemID"]))
moonDetails = ESI.getMoonDetails(fulldetails["moonID"])
moonName = moonDetails["name"]
corpDetails = ESI.getCorpData(fulldetails["corpID"])
corpName = corpDetails["name"]
notifyingMessage = (pinger + " Tower Low On Fuel - [" + timestamp + "]\n" + bolders + "A(n) " + posType + " in " + systemName + " Is Low On Fuel!" + bolders + "\nLocation: " + getLink(systemName, ("http://evemaps.dotlan.net/system/" + systemName.replace(" ","_")), bolders) + " (" + moonName.replace(systemName, "Planet") + ") [" + getLink(regionName, ("http://evemaps.dotlan.net/map/" + regionName.replace(" ","_") + "/" + systemName.replace(" ","_")), bolders) + "]\nOwner: " + getLink(corpName, ("http://evemaps.dotlan.net/corp/" + corpName.replace(" ","_")), bolders) + "\nRequired Fuel: \n```\n")
for fuels in fulldetails["wants"]:
notifyingMessage += (typeidlist[str(fuels["typeID"])] + ": " + str(fuels["quantity"]) + " Units Remaining\n")
notifyingMessage += "```"
return notifyingMessage
def AllAnchoringMsg(timestamp, fulldetails, typeidlist, geographicinformation, bolders, pinger, accessToken):
posType = typeidlist[str(fulldetails["typeID"])]
systemName = ESI.getSystemName(geographicinformation, str(fulldetails["solarSystemID"]))
regionName = ESI.getRegionName(geographicinformation, str(fulldetails["solarSystemID"]))
moonDetails = ESI.getMoonDetails(fulldetails["moonID"])
moonName = moonDetails["name"]
corpDetails = ESI.getCorpData(fulldetails["corpID"])
corpName = corpDetails["name"]
if "alliance_id" in corpDetails:
allianceDetails = ESI.getAllianceData(corpDetails["alliance_id"])
allianceName = allianceDetails["name"]
anchorerString = (getLink(corpName, ("http://evemaps.dotlan.net/corp/" + str(fulldetails["corpID"])), bolders) + " [" + getLink(allianceName, ("http://evemaps.dotlan.net/alliance/" + str(corpDetails["alliance_id"])), bolders) + "]")
else:
anchorerString = (getLink(corpName, ("http://evemaps.dotlan.net/corp/" + str(fulldetails["corpID"])), bolders))
notifyingMessage = (pinger + " Tower Anchoring - [" + timestamp + "]\n" + bolders + "A(n) " + posType + " Has Begun Anchoring in " + systemName + "!" + bolders + "\nLocation: " + getLink(systemName, ("http://evemaps.dotlan.net/system/" + systemName.replace(" ","_")), bolders) + " (" + moonName.replace(systemName, "Planet") + ") [" + getLink(regionName, ("http://evemaps.dotlan.net/map/" + regionName.replace(" ","_") + "/" + systemName.replace(" ","_")), bolders) + "]\nAnchoring Entity: " + anchorerString)
return notifyingMessage
def findFunction(type):
functionList = {
"EntosisCaptureStarted" : EntosisCaptureStarted,
"StructureDestroyed" : StructureDestroyed,
"StructureLostArmor" : StructureLostArmor,
"StructureLostShields" : StructureLostShields,
"StructureUnderAttack" : StructureUnderAttack,
"MoonminingAutomaticFracture" : MoonminingAutomaticFracture,
"MoonminingExtractionCancelled" : MoonminingExtractionCancelled,
"MoonminingExtractionFinished" : MoonminingExtractionFinished,
"MoonminingExtractionStarted" : MoonminingExtractionStarted,
"MoonminingLaserFired" : MoonminingLaserFired,
"StructureAnchoring" : StructureAnchoring,
"StructureFuelAlert" : StructureFuelAlert,
"StructureOnline" : StructureOnline,
"StructureUnanchoring" : StructureUnanchoring,
"StructureServicesOffline" : StructureServicesOffline,
"StructureWentHighPower" : StructureWentHighPower,
"StructureWentLowPower" : StructureWentLowPower,
"StructuresReinforcementChanged" : StructuresReinforcementChanged,
"OwnershipTransferred" : OwnershipTransferred,
"SovCommandNodeEventStarted" : SovCommandNodeEventStarted,
"SovStructureReinforced" : SovStructureReinforced,
"SovStructureDestroyed" : SovStructureDestroyed,
"SovAllClaimAquiredMsg" : SovAllClaimAquiredMsg,
"SovAllClaimLostMsg" : SovAllClaimLostMsg,
"SovStructureSelfDestructRequested" : SovStructureSelfDestructRequested,
"SovStructureSelfDestructFinished" : SovStructureSelfDestructFinished,
"SovStructureSelfDestructCancel" : SovStructureSelfDestructCancel,
"OrbitalAttacked" : OrbitalAttacked,
"OrbitalReinforced" : OrbitalReinforced,
"TowerAlertMsg" | |
layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
# Test (list of rectangles, shape_type) tuple
shape = (10, 4, 2)
vertices = 20 * np.random.random(shape)
data = (vertices, "rectangle")
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices)])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
# Test list of (rectangle, shape_type) tuples
data = [(vertices[i], "rectangle") for i in range(shape[0])]
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices)])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
def test_rectangles_roundtrip():
"""Test a full roundtrip with rectangles data."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
new_layer = Shapes(layer.data)
assert np.all([nd == d for nd, d in zip(new_layer.data, layer.data)])
def test_integer_rectangle():
"""Test instantiating rectangles with integer data."""
shape = (10, 2, 2)
np.random.seed(1)
data = np.random.randint(20, size=shape)
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([len(ld) == 4 for ld in layer.data])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
def test_negative_rectangle():
"""Test instantiating rectangles with negative data."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape) - 10
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
def test_empty_rectangle():
"""Test instantiating rectangles with empty data."""
shape = (0, 0, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'rectangle' for s in layer.shape_type])
def test_3D_rectangles():
"""Test instantiating Shapes layer with 3D planar rectangles."""
# Test a single four corner rectangle
np.random.seed(0)
planes = np.tile(np.arange(10).reshape((10, 1, 1)), (1, 4, 1))
corners = np.random.uniform(0, 10, size=(10, 4, 2))
data = np.concatenate((planes, corners), axis=2)
layer = Shapes(data)
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == 3
assert np.all([s == 'rectangle' for s in layer.shape_type])
def test_ellipses():
"""Test instantiating Shapes layer with a random 2D ellipses."""
# Test a single four corner ellipses
shape = (1, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test multiple four corner ellipses
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test a single ellipse center radii, which gets converted into four
# corner ellipse
shape = (1, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == 1
assert len(layer.data[0]) == 4
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test multiple center radii ellipses
shape = (10, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == shape[0]
assert np.all([len(ld) == 4 for ld in layer.data])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
def test_ellipses_with_shape_type():
"""Test instantiating ellipses with shape_type in data"""
# Test single four corner (vertices, shape_type) tuple
shape = (1, 4, 2)
np.random.seed(0)
vertices = 20 * np.random.random(shape)
data = (vertices, "ellipse")
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test multiple four corner (list of vertices, shape_type) tuple
shape = (10, 4, 2)
np.random.seed(0)
vertices = 20 * np.random.random(shape)
data = (vertices, "ellipse")
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices)])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test list of four corner (vertices, shape_type) tuples
shape = (10, 4, 2)
np.random.seed(0)
vertices = 20 * np.random.random(shape)
data = [(vertices[i], "ellipse") for i in range(shape[0])]
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, vertices)])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test single (center-radii, shape_type) ellipse
shape = (1, 2, 2)
np.random.seed(0)
data = (20 * np.random.random(shape), "ellipse")
layer = Shapes(data)
assert layer.nshapes == 1
assert len(layer.data[0]) == 4
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test (list of center-radii, shape_type) tuple
shape = (10, 2, 2)
np.random.seed(0)
center_radii = 20 * np.random.random(shape)
data = (center_radii, "ellipse")
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([len(ld) == 4 for ld in layer.data])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
# Test list of (center-radii, shape_type) tuples
shape = (10, 2, 2)
np.random.seed(0)
center_radii = 20 * np.random.random(shape)
data = [(center_radii[i], "ellipse") for i in range(shape[0])]
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([len(ld) == 4 for ld in layer.data])
assert layer.ndim == shape[2]
assert np.all([s == 'ellipse' for s in layer.shape_type])
def test_4D_ellispse():
"""Test instantiating Shapes layer with 4D planar ellipse."""
# Test a single 4D ellipse
np.random.seed(0)
data = [
[
[3, 5, 108, 108],
[3, 5, 108, 148],
[3, 5, 148, 148],
[3, 5, 148, 108],
]
]
layer = Shapes(data, shape_type='ellipse')
assert layer.nshapes == len(data)
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == 4
assert np.all([s == 'ellipse' for s in layer.shape_type])
def test_ellipses_roundtrip():
"""Test a full roundtrip with ellipss data."""
shape = (10, 4, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='ellipse')
new_layer = Shapes(layer.data, shape_type='ellipse')
assert np.all([nd == d for nd, d in zip(new_layer.data, layer.data)])
def test_lines():
"""Test instantiating Shapes layer with a random 2D lines."""
# Test a single two end point line
shape = (1, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='line')
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == data[0])
assert layer.ndim == shape[2]
assert np.all([s == 'line' for s in layer.shape_type])
# Test multiple lines
shape = (10, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='line')
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer.ndim == shape[2]
assert np.all([s == 'line' for s in layer.shape_type])
def test_lines_with_shape_type():
"""Test instantiating lines with shape_type"""
# Test (single line, shape_type) tuple
shape = (1, 2, 2)
np.random.seed(0)
end_points = 20 * np.random.random(shape)
data = (end_points, 'line')
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all(layer.data[0] == end_points[0])
assert layer.ndim == shape[2]
assert np.all([s == 'line' for s in layer.shape_type])
# Test (multiple lines, shape_type) tuple
shape = (10, 2, 2)
np.random.seed(0)
end_points = 20 * np.random.random(shape)
data = (end_points, "line")
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, end_points)])
assert layer.ndim == shape[2]
assert np.all([s == 'line' for s in layer.shape_type])
# Test list of (line, shape_type) tuples
shape = (10, 2, 2)
np.random.seed(0)
end_points = 20 * np.random.random(shape)
data = [(end_points[i], "line") for i in range(shape[0])]
layer = Shapes(data)
assert layer.nshapes == shape[0]
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, end_points)])
assert layer.ndim == shape[2]
assert np.all([s == 'line' for s in layer.shape_type])
def test_lines_roundtrip():
"""Test a full roundtrip with line data."""
shape = (10, 2, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Shapes(data, shape_type='line')
new_layer = Shapes(layer.data, shape_type='line')
assert np.all([nd == d for nd, d in zip(new_layer.data, layer.data)])
def test_paths():
"""Test instantiating Shapes layer with a random 2D paths."""
# Test a single path with 6 points
shape = (1, 6, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
| |
user, user_connections in six.iteritems(self.connections[project_id]):
if user == '':
# do not collect anonymous connections
continue
for uid, client in six.iteritems(user_connections):
if client.examined_at and client.examined_at + project.get("connection_lifetime", 24*365*3600) < now:
to_return.add(user)
raise Return((to_return, None))
@coroutine
def check_expired_connections(self):
"""
For each project ask web application about users whose connections expired.
Close connections of deactivated users and keep valid users' connections.
"""
projects, error = yield self.structure.project_list()
if error:
raise Return((None, error))
checks = []
for project in projects:
if project.get('connection_check', False):
checks.append(self.check_project_expired_connections(project))
try:
# run all checks in parallel
yield checks
except Exception as err:
logger.error(err)
tornado.ioloop.IOLoop.instance().add_timeout(
time.time()+self.CONNECTION_EXPIRE_CHECK_INTERVAL,
self.check_expired_connections
)
raise Return((True, None))
@coroutine
def check_project_expired_connections(self, project):
now = time.time()
project_id = project['_id']
checked_at = self.expired_connections.get(project_id, {}).get("checked_at")
if checked_at and (now - checked_at < project.get("connection_check_interval", 60)):
raise Return((True, None))
users = self.expired_connections.get(project_id, {}).get("users", {}).copy()
if not users:
raise Return((True, None))
self.expired_connections[project_id]["users"] = set()
expired_reconnect_clients = self.expired_reconnections.get(project_id, [])[:]
self.expired_reconnections[project_id] = []
inactive_users, error = yield self.check_users(project, users)
if error:
raise Return((False, error))
self.expired_connections[project_id]["checked_at"] = now
now = time.time()
clients_to_disconnect = []
if isinstance(inactive_users, list):
# a list of inactive users received, iterate trough connections
# destroy inactive, update active.
if project_id in self.connections:
for user, user_connections in six.iteritems(self.connections[project_id]):
for uid, client in six.iteritems(user_connections):
if client.user in inactive_users:
clients_to_disconnect.append(client)
elif client.user in users:
client.examined_at = now
for client in clients_to_disconnect:
yield client.send_disconnect_message("deactivated")
yield client.close_sock()
if isinstance(inactive_users, list):
# now deal with users waiting for reconnect with expired credentials
for client in expired_reconnect_clients:
is_valid = client.user not in inactive_users
if is_valid:
client.examined_at = now
if client.connect_queue:
yield client.connect_queue.put(is_valid)
else:
yield client.close_sock()
raise Return((True, None))
@staticmethod
@coroutine
def check_users(project, users, timeout=5):
address = project.get("connection_check_address")
if not address:
logger.debug("no connection check address for project {0}".format(project['name']))
raise Return(())
http_client = AsyncHTTPClient()
request = HTTPRequest(
address,
method="POST",
body=urlencode({
'users': json.dumps(list(users))
}),
request_timeout=timeout
)
try:
response = yield http_client.fetch(request)
except Exception as err:
logger.error(err)
raise Return((None, None))
else:
if response.code != 200:
raise Return((None, None))
try:
content = [str(x) for x in json.loads(response.body)]
except Exception as err:
logger.error(err)
raise Return((None, err))
raise Return((content, None))
def add_connection(self, project_id, user, uid, client):
"""
Register new client's connection.
"""
if project_id not in self.connections:
self.connections[project_id] = {}
if user not in self.connections[project_id]:
self.connections[project_id][user] = {}
self.connections[project_id][user][uid] = client
def remove_connection(self, project_id, user, uid):
"""
Remove client's connection
"""
try:
del self.connections[project_id][user][uid]
except KeyError:
pass
if project_id in self.connections and user in self.connections[project_id]:
# clean connections
if self.connections[project_id][user]:
return
try:
del self.connections[project_id][user]
except KeyError:
pass
if self.connections[project_id]:
return
try:
del self.connections[project_id]
except KeyError:
pass
def add_admin_connection(self, uid, client):
"""
Register administrator's connection (from web-interface).
"""
self.admin_connections[uid] = client
def remove_admin_connection(self, uid):
"""
Remove administrator's connection.
"""
try:
del self.admin_connections[uid]
except KeyError:
pass
@coroutine
def get_project(self, project_id):
"""
Project settings can change during client's connection.
Every time we need project - we must extract actual
project data from structure.
"""
project, error = yield self.structure.get_project_by_id(project_id)
if error:
raise Return((None, self.INTERNAL_SERVER_ERROR))
if not project:
raise Return((None, self.PROJECT_NOT_FOUND))
raise Return((project, None))
def extract_namespace_name(self, channel):
"""
Get namespace name from channel name
"""
if self.NAMESPACE_SEPARATOR in channel:
# namespace:rest_of_channel
namespace_name = channel.split(self.NAMESPACE_SEPARATOR, 1)[0]
else:
namespace_name = None
return namespace_name
@coroutine
def get_namespace(self, project, channel):
namespace_name = self.extract_namespace_name(channel)
if not namespace_name:
raise Return((project, None))
namespace, error = yield self.structure.get_namespace_by_name(
project, namespace_name
)
if error:
raise Return((None, self.INTERNAL_SERVER_ERROR))
if not namespace:
raise Return((None, self.NAMESPACE_NOT_FOUND))
raise Return((namespace, None))
@coroutine
def handle_ping(self, params):
"""
Ping message received.
"""
params['updated_at'] = time.time()
self.nodes[params.get('uid')] = params
@coroutine
def handle_unsubscribe(self, params):
"""
Unsubscribe message received - unsubscribe client from certain channels.
"""
project = params.get("project")
user = params.get("user")
channel = params.get("channel", None)
project_id = project['_id']
# try to find user's connection
user_connections = self.connections.get(project_id, {}).get(user, {})
if not user_connections:
raise Return((True, None))
for uid, connection in six.iteritems(user_connections):
if not channel:
# unsubscribe from all channels
for chan, channel_info in six.iteritems(connection.channels):
yield connection.handle_unsubscribe({
"channel": chan
})
else:
# unsubscribe from certain channel
yield connection.handle_unsubscribe({
"channel": channel
})
raise Return((True, None))
@coroutine
def handle_disconnect(self, params):
"""
Handle disconnect message - when user deactivated in web application
and its connections must be closed by Centrifuge by force
"""
project = params.get("project")
user = params.get("user")
reason = params.get("reason", None)
project_id = project['_id']
# try to find user's connection
user_connections = self.connections.get(project_id, {}).get(user, {})
if not user_connections:
raise Return((True, None))
clients_to_disconnect = []
for uid, client in six.iteritems(user_connections):
clients_to_disconnect.append(client)
for client in clients_to_disconnect:
yield client.send_disconnect_message(reason=reason)
yield client.close_sock(pause=False)
raise Return((True, None))
@coroutine
def handle_update_structure(self, params):
"""
Update structure message received - structure changed and other
node sent us a signal about update.
"""
result, error = yield self.structure.update()
raise Return((result, error))
# noinspection PyCallingNonCallable
@coroutine
def process_call(self, project, method, params):
"""
Call appropriate method from this class according to specified method.
Note, that all permission checking must be done before calling this method.
"""
handle_func = getattr(self, "process_%s" % method, None)
if handle_func:
result, error = yield handle_func(project, params)
raise Return((result, error))
else:
raise Return((None, self.METHOD_NOT_FOUND))
@coroutine
def publish_message(self, project, message):
"""
Publish event into PUB socket stream
"""
project_id = message['project_id']
channel = message['channel']
namespace, error = yield self.get_namespace(project, channel)
if error:
raise Return((False, error))
if namespace.get('is_watching', False):
# send to admin channel
self.engine.publish_admin_message(message)
# send to event channel
subscription_key = self.engine.get_subscription_key(
project_id, channel
)
# no need in project id when sending message to clients
del message['project_id']
self.engine.publish_message(subscription_key, message)
if namespace.get('history', False):
yield self.engine.add_history_message(
project_id, channel, message,
history_size=namespace.get('history_size'),
history_expire=namespace.get('history_expire', 0)
)
if self.collector:
self.collector.incr('messages')
raise Return((True, None))
@coroutine
def prepare_message(self, project, params, client):
"""
Prepare message before actual publishing.
"""
data = params.get('data', None)
message = {
'project_id': project['_id'],
'uid': uuid.uuid4().hex,
'timestamp': int(time.time()),
'client': client,
'channel': params.get('channel'),
'data': data
}
for callback in self.pre_publish_callbacks:
try:
message = yield callback(message)
except Exception as err:
logger.exception(err)
else:
if message is None:
raise Return((None, None))
raise Return((message, None))
@coroutine
def process_publish(self, project, params, client=None):
"""
Publish message into appropriate channel.
"""
message, error = yield self.prepare_message(
project, params, client
)
if error:
raise Return((False, self.INTERNAL_SERVER_ERROR))
if not message:
# message was discarded
raise Return((False, None))
# publish prepared message
result, error = yield self.publish_message(
project, message
)
if error:
raise Return((False, error))
for callback in self.post_publish_callbacks:
try:
yield callback(message)
except Exception as err:
logger.exception(err)
raise Return((True, None))
@coroutine
def process_history(self, project, params):
"""
Return a list of last messages sent into channel.
"""
project_id = project['_id']
channel = params.get("channel")
data, error = yield self.engine.get_history(project_id, channel)
if error:
raise Return((data, self.INTERNAL_SERVER_ERROR))
raise Return((data, None))
@coroutine
def process_presence(self, project, params):
"""
Return current presence information for channel.
"""
project_id = project['_id']
channel = params.get("channel")
data, error = yield self.engine.get_presence(project_id, channel)
if error:
raise Return((data, self.INTERNAL_SERVER_ERROR))
raise Return((data, None))
@coroutine
def process_unsubscribe(self, project, params):
"""
Unsubscribe user from channels.
"""
params["project"] = project
message = {
'app_id': self.uid,
'method': 'unsubscribe',
'params': params
}
# handle on this node
result, error = yield self.handle_unsubscribe(params)
# send to other nodes
self.engine.publish_control_message(message)
if error:
raise Return((result, self.INTERNAL_SERVER_ERROR))
raise Return((result, None))
@coroutine
def process_disconnect(self, project, params):
"""
Unsubscribe user from channels.
"""
params["project"] = project
message = {
'app_id': self.uid,
'method': 'disconnect',
'params': params
}
# handle on this node
result, error = yield self.handle_disconnect(params)
# send to other nodes
self.engine.publish_control_message(message)
if error:
raise Return((result, self.INTERNAL_SERVER_ERROR))
raise Return((result, None))
@coroutine
def process_dump_structure(self, project, params):
projects, error = yield self.structure.project_list()
if error:
raise Return((None, self.INTERNAL_SERVER_ERROR))
namespaces, error = yield self.structure.namespace_list()
if error:
raise Return((None, self.INTERNAL_SERVER_ERROR))
data = {
"projects": projects,
"namespaces": namespaces
}
raise Return((data, None))
@coroutine
def process_project_list(self, project, params):
projects, error = yield self.structure.project_list()
if error:
raise Return((None, self.INTERNAL_SERVER_ERROR))
raise Return((projects, None))
@coroutine
def process_project_get(self, project, params):
if not project:
raise Return((None, self.PROJECT_NOT_FOUND))
raise Return((project, None))
@coroutine
def process_project_by_name(self, project, params):
project, error = yield self.structure.get_project_by_name(
params.get("name")
)
if error:
raise Return((None, self.INTERNAL_SERVER_ERROR))
if not project:
raise Return((None, self.PROJECT_NOT_FOUND))
raise Return((project, None))
@coroutine
def process_project_create(self, project, params, error_form=False):
| |
self.get_index(date1)
S2 = self.get_index(date2)
print("removing "+str(S1+(self.n_t-S2-1))+" time points")
new_datax = self.x[S1:S2+1,:,:]
new_datay = self.y[S1:S2+1,:,:]
new_dates = self.dates[S1:S2+1]
if type(self.mask) == bool:
# just return the data because it's not masked
new_mask = self.mask
else:
new_mask = self.mask[S1:S2+1,:,:]
self.x = new_datax
self.y = new_datay
self.dates= new_dates
self.mask = new_mask
self.n_t = np.shape(new_datax)[0]
# rebuild yrpd
self.nyrs = self.dates[-1].year - self.dates[0].year + 1
self.yrpd = np.ma.empty([self.nyrs,self.periods],dtype = int)
self.yrpd[:,:] = -1
if self.periods == 12:
for tt in range(self.n_t):
yr = self.dates[tt].year - self.dates[0].year
mt = self.dates[tt].month - 1
# print(yr,mt,tt)
self.yrpd[yr,mt] = tt
elif self.periods < 12:
for tt in range(self.n_t):
yr = self.dates[tt].year - self.dates[0].year
mt = int(self.dates[tt].month*periods/12) - 1
# print(yr,mt,tt)
self.yrpd[yr,mt] = tt
elif self.periods > 12:
for tt in range(self.n_t):
yr = self.dates[tt].year - self.dates[0].year
# day_of_year = (self.dates[tt] - dt.datetime(self.dates[tt].year,1,1)).days + 1
day_of_year = self.dates[tt].timetuple().tm_yday
mt = int(day_of_year*self.periods/366) - 1
# print(yr,mt,tt)
self.yrpd[yr,mt] = tt
self.yrpd.mask = self.yrpd < 0
print("New vec_data_year, size "+str(self.n_t)+", for "+str(self.nyrs)+" years")
def __getitem__(self,indx):
if (type(indx) == int) or (type(indx) == np.int64):
t_p = indx
m = slice(None)
n = slice(None)
else:
t_p,m,n = indx
if type(self.mask) == bool:
# just return the data because it's not masked
return self.x[t_p,m,n], self.y[t_p,m,n]
else:
temp_x = self.x[t_p,m,n]
temp_y = self.y[t_p,m,n]
temp_mask = self.mask[t_p,m,n]
if type(temp_x) == np.ndarray:
temp_x[temp_mask==False] = np.nan
temp_y[temp_mask==False] = np.nan
return temp_x, temp_y
elif temp_mask:
return temp_x, temp_y
else:
return np.nan, np.nan
def mag(self,indx):
"""
crude getitem for magnitudes
indx = [3,item,thing] to get the bits you want
we cannot pass :
instead of ':' write 'slice(None)'
instead of 'a:b' write 'slice(a,b,None)'
"""
if (type(indx) == int) or (type(indx) == np.int64):
t_p = indx
m = slice(None)
n = slice(None)
else:
t_p,m,n = indx
if type(self.mask) == bool:
# just return the data because it's not masked
return np.hypot(self.x[t_p,m,n], self.y[t_p,m,n])
else:
temp_x = self.x[t_p,m,n]
temp_y = self.y[t_p,m,n]
temp_mask = self.mask[t_p,m,n]
if type(temp_x) == np.ndarray:
temp_x[temp_mask==False] = np.nan
temp_y[temp_mask==False] = np.nan
return np.hypot(temp_x, temp_y)
elif temp_mask:
return np.hypot(temp_x, temp_y)
else:
return np.nan
def mean_series(self,mask = False,year_set = [],time_set = [],method='mean',mult_array=False,magnitude= False):
"""
Mask needs to be 1 for true 0 for false
"""
# check if there's data here already
if self.files:
mask,y0,yE,t0,tE = get_range_mask(self,mask,year_set,time_set)
# using all periods within yrpd including empty
out_listx = []
out_listy = []
d_list = []
dprev = self.dates[0]
for y in range(y0,yE+1):
for p in range(self.periods):
if self.yrpd.mask[y,p]:
out_listx.append(np.nan)
out_listy.append(np.nan)
d_list.append(dprev)
else:
tp = self.yrpd[y,p]
tempx = self.x[tp]
tempx[mask[tp]==False] = np.nan
tempy = self.y[tp]
tempy[mask[tp]==False] = np.nan
if type(mult_array) == np.ndarray:
tempx = tempx*mult_array
tempy = tempy*mult_array
if magnitude:
if method=='mean':
out_listx.append(np.nanmean(
np.hypot(tempx,tempy)))
if method=='median':
out_listx.append(np.nanmedian(
np.hypot(tempx,tempy)))
if method=='std':
out_listx.append(np.nanstd(
np.hypot(tempx,tempy)))
else:
if method=='mean':
out_listx.append(np.nanmean(tempx))
out_listy.append(np.nanmean(tempy))
if method=='median':
out_listx.append(np.nanmedian(tempx))
out_listy.append(np.nanmedian(tempy))
if method=='std':
out_listx.append(np.nanstd(tempx))
out_listy.append(np.nanstd(tempy))
d_list.append(self.dates[tp])
dprev = self.dates[tp]
if magnitude:
return d_list, out_listx
else:
return d_list, out_listx, out_listy
def centile_series(self,centiles,mask = False,year_set = [],time_set = [],method='mean',mult_array=False):
"""
Mask needs to be 1 for true 0 for false
"""
# check if there's data here already
if self.files:
mask,y0,yE,t0,tE = get_range_mask(self,mask,year_set,time_set)
# using all periods within yrpd including empty
out_listx = []
out_listy = []
d_list = []
dprev = self.dates[0]
for y in range(y0,yE+1):
for p in range(self.periods):
if self.yrpd.mask[y,p]:
out_list.append(np.nan)
d_list.append(dprev)
else:
tp = self.yrpd[y,p]
tempx = self.x[tp]
tempx[mask[tp]==False] = np.nan
tempy = self.y[tp]
tempy[mask[tp]==False] = np.nan
if type(mult_array) == np.ndarray:
tempx = tempx*mult_array
tempy = tempy*mult_array
if magnitude:
out_listx.append(np.nanpercentile(
np.hypot(tempx,tempy),centiles))
else:
out_listx.append(np.nanpercentile(tempx,centiles))
out_listy.append(np.nanpercentile(tempy,centiles))
d_list.append(self.dates[tp])
dprev = self.dates[tp]
if magnitude:
return d_list, out_listx
else:
return d_list, out_listx, out_listy
def print_date(self,t,string='auto',year_only=False):
"""
Quickly return a date lable from a given data_year time point
return format can be overidden by setting string to datetime string format
otherwise it is 'auto'
year_only = true overides everything and just gives the year
"""
# simply get a date string for a time point
if year_only: # year_only overides
str_option = '%Y'
elif string=='auto':
# auto generate the strftime option from no. of periods
# if periods = 4 then year + JFM etc...
# Add this option later use yrpd to find quarter
# manually set JFM etc
# if periods < 12 then months only
if self.periods <= 12:
str_option = '%Y-%m'
elif self.periods <= 366:
str_option = '%Y-%m-%d'
# longer then days too
else:
str_option = '%Y-%m-%d-T%H'
else:
str_option = string
return self.dates[t].strftime(str_option)
def build_mask(self):
"""
fills the mask array with ones if it isn't there yet
"""
if type(self.mask) == bool:
self.mask = np.ones(self.x.shape,dtype=bool)
def build_static_mask(self,mask,points = False,overwrite=False):
"""
Makes a satic 2d mask for all data
or only the time points listed in points option
mask is 1/True for good, nan/False bad
overwrite = True, makes the mask identical to input mask
overwrite = False, apends the current mask to match
if you want to make a temporal mask for a condition
do it your self with logical
ie.
DY.build_mask()
DY.mask[DY.data > limit] = 0
will temporarily mask out all data
over the limit
"""
if type(self.mask) == bool:
self.build_mask()
if (type(mask[0,0])!=bool) and (type(mask[0,0])!=np.bool_):
print('mask needs to be binary, not',type(mask[0,0]))
return
if type(points) == bool:
points = np.arange(0,self.n_t,dtype=int)
if overwrite:
temp_mask = np.ones_like(self.x,dtype=bool)
for tt in points:
temp_mask[tt][mask==False] = False
self.mask = temp_mask
else:
for tt in points:
self.mask[tt][mask==False] = False
def append(self,date,datax,datay):
# check if there's data here already
if ~self.files:
print("nothing here, so can't append")
return False
# check the new data is the correct size
m_check,n_check = np.shape(datax)
if m_check==self.m&n_check==self.n:
# append the data
self.x = np.append(self.x,np.expand_dims(datax,axis=0),axis = 0)
self.y = np.append(self.y,np.expand_dims(datay,axis=0),axis = 0)
self.dates.append(date)
# find the final entry in the yrpd
loc = np.where(self.yrpd == self.n_t)
# add the next entry(ies)
if loc[1][0] == self.periods - 1:
# add new rows if needed - keep the yrmth consistent
self.yrpd = np.ma.append(self.yrpd,
np.ma.masked_values(np.ones([1,self.periods]),1),axis=0)
self.yrpd[-1,0] = self.n_t + 1
else:
self.yrpd[-1,loc[1][0]] = self.n_t + 1
self.n_t += 1
return True
else: return True
# adds another time slice to the data, and sorts out the yrpd array
def clim_map(self,periods,mask = False,magnitude = False,year_set = [],time_set = []):
"""
periods is the list of period no.s to use in the map
ie. periods = [0,1,2] with give the map of average over
the first three months of a monthly data_year
setting magnitude = True, takes the average of the vector
hypot, rather than the average of each component
"""
# check if there's data here already
if self.files:
mask,y0,yE,t0,tE = get_range_mask(self,mask,year_set,time_set)
if len(periods) == 0: periods = np.arange(0,self.periods)
idx = [self.yrpd[y0:yE+1,mn].compressed() for mn in periods]
temp_mask = np.sum([mask[j,:,:]
for i in idx for j in i if j>=t0 and j<=tE],axis = 0)
if magnitude:
temp_x = np.nanmean(
[np.hypot(self.x[j],self.y[j]) for i in idx for j in i if j>=t0 and j<=tE],
axis = 0)
temp_x[temp_mask==False] = np.nan
return temp_x
else:
temp_x = np.nanmean(
[self.x[j] for i in idx for j in i if j>=t0 and j<=tE],
axis = 0)
temp_y = np.nanmean(
[self.y[j] for i in idx for j in i if j>=t0 and j<=tE],
axis = 0)
temp_x[temp_mask==False] = np.nan
temp_y[temp_mask==False] = np.nan
return temp_x,temp_y
# check if there's data here already
def clim_mean(self,mask = False,magnitude = False,year_set = [],time_set = [],method='mean',first_p = 0):
"""
Mask needs to be 1 for true 0 for false
"""
# check if there's data here already
if self.files:
mask,y0,yE,t0,tE = get_range_mask(self,mask,year_set,time_set)
temp_x = np.empty([self.periods])
temp_y = np.empty([self.periods])
if magnitude:
for mn in range(self.periods):
idx = self.yrpd[y0:yE+1,mn].compressed()
t_mn = np.sum((idx>=t0)&(idx<=tE))
temp_x1 = np.empty([t_mn,self.m,self.n])
temp_y1 = np.empty([t_mn,self.m,self.n])
temp_mask = np.empty([t_mn,self.m,self.n],dtype=bool)
temp_x1[:,:,:] = [self.x[i] for i in idx if i>=t0 and i<=tE]
temp_y1[:,:,:] = [self.y[i] for i in idx if i>=t0 and i<=tE]
temp_mask[:,:,:] = [self.mask[i] for i in idx if i>=t0 and i<=tE]
temp = np.hypot(temp_x1,temp_y1)
temp[temp_mask==False] | |
<gh_stars>0
""" main file to create an index from the the begining """
import json
import logging
import logging.config
import multiprocessing
import os
import tempfile
import uuid
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import faiss
import fire
import fsspec
import numpy as np
import pandas as pd
from autofaiss.external.build import (
create_index,
estimate_memory_required_for_index_creation,
get_estimated_construction_time_infos,
)
from autofaiss.external.optimize import (
get_optimal_hyperparameters,
get_optimal_index_keys_v2,
optimize_and_measure_index,
optimize_and_measure_indices,
)
from autofaiss.external.scores import compute_fast_metrics, compute_medium_metrics
from autofaiss.indices.index_utils import set_search_hyperparameters
from autofaiss.readers.embeddings_iterators import get_file_list, make_path_absolute, read_total_nb_vectors_and_dim
from autofaiss.utils.cast import cast_bytes_to_memory_string, cast_memory_to_bytes
from autofaiss.utils.decorators import Timeit
logger = logging.getLogger("autofaiss")
def _log_output_dict(infos: Dict):
logger.info("{")
for key, value in infos.items():
logger.info(f"\t{key}: {value}")
logger.info("}")
def setup_logging(logging_level: int):
"""Setup the logging."""
logging.config.dictConfig(dict(version=1, disable_existing_loggers=False))
logging_format = "%(asctime)s [%(levelname)s]: %(message)s"
logging.basicConfig(level=logging_level, format=logging_format)
def build_index(
embeddings: Union[str, np.ndarray, List[str]],
index_path: Optional[str] = "knn.index",
index_infos_path: Optional[str] = "index_infos.json",
ids_path: Optional[str] = None,
save_on_disk: bool = True,
file_format: str = "npy",
embedding_column_name: str = "embedding",
id_columns: Optional[List[str]] = None,
index_key: Optional[str] = None,
index_param: Optional[str] = None,
max_index_query_time_ms: float = 10.0,
max_index_memory_usage: str = "16G",
current_memory_available: str = "32G",
use_gpu: bool = False,
metric_type: str = "ip",
nb_cores: Optional[int] = None,
make_direct_map: bool = False,
should_be_memory_mappable: bool = False,
distributed: Optional[str] = None,
temporary_indices_folder: str = "hdfs://root/tmp/distributed_autofaiss_indices",
verbose: int = logging.INFO,
nb_indices_to_keep: int = 1,
) -> Union[Tuple[Optional[Any], Optional[Dict[str, Union[str, float, int]]]], Dict[str, Dict]]:
"""
Reads embeddings and creates a quantized index from them.
The index is stored on the current machine at the given output path.
Parameters
----------
embeddings : Union[str, np.ndarray, List[str]]
Local path containing all preprocessed vectors and cached files.
This could be a single directory or multiple directories.
Files will be added if empty.
Or directly the Numpy array of embeddings
index_path: Optional(str)
Destination path of the quantized model.
index_infos_path: Optional(str)
Destination path of the metadata file.
ids_path: Optional(str)
Only useful when id_columns is not None and file_format=`parquet`. T
his will be the path (in any filesystem)
where the mapping files Ids->vector index will be store in parquet format
save_on_disk: bool
Whether to save the index on disk, default to True.
file_format: Optional(str)
npy or parquet ; default npy
embedding_column_name: Optional(str)
embeddings column name for parquet ; default embedding
id_columns: Optional(List[str])
Can only be used when file_format=`parquet`.
In this case these are the names of the columns containing the Ids of the vectors,
and separate files will be generated to map these ids to indices in the KNN index ;
default None
index_key: Optional(str)
Optional string to give to the index factory in order to create the index.
If None, an index is chosen based on an heuristic.
index_param: Optional(str)
Optional string with hyperparameters to set to the index.
If None, the hyper-parameters are chosen based on an heuristic.
max_index_query_time_ms: float
Bound on the query time for KNN search, this bound is approximative
max_index_memory_usage: str
Maximum size allowed for the index, this bound is strict
current_memory_available: str
Memory available on the machine creating the index, having more memory is a boost
because it reduces the swipe between RAM and disk.
use_gpu: bool
Experimental, gpu training is faster, not tested so far
metric_type: str
Similarity function used for query:
- "ip" for inner product
- "l2" for euclidian distance
nb_cores: Optional[int]
Number of cores to use. Will try to guess the right number if not provided
make_direct_map: bool
Create a direct map allowing reconstruction of embeddings. This is only needed for IVF indices.
Note that might increase the RAM usage (approximately 8GB for 1 billion embeddings)
should_be_memory_mappable: bool
If set to true, the created index will be selected only among the indices that can be memory-mapped on disk.
This makes it possible to use 50GB indices on a machine with only 1GB of RAM. Default to False
distributed: Optional[str]
If "pyspark", create the indices using pyspark.
Only "parquet" file format is supported.
temporary_indices_folder: str
Folder to save the temporary small indices that are generated by each spark executor.
Only used when distributed = "pyspark".
verbose: int
set verbosity of outputs via logging level, default is `logging.INFO`
nb_indices_to_keep: int
Number of indices to keep at most when distributed is "pyspark".
It allows you to build an index larger than `current_memory_available`
If it is not equal to 1,
- You are expected to have at most `nb_indices_to_keep` indices with the following names:
"{index_path}i" where i ranges from 1 to `nb_indices_to_keep`
- `build_index` returns a mapping from index path to metrics instead of a tuple (index, metrics)
Default to 1.
"""
setup_logging(verbose)
if index_path is not None:
index_path = make_path_absolute(index_path)
elif save_on_disk:
logger.error("Please specify a index_path if you set save_on_disk as True")
return None, None
if index_infos_path is not None:
index_infos_path = make_path_absolute(index_infos_path)
elif save_on_disk:
logger.error("Please specify a index_infos_path if you set save_on_disk as True")
return None, None
if ids_path is not None:
ids_path = make_path_absolute(ids_path)
if nb_indices_to_keep < 1:
logger.error("Please specify nb_indices_to_keep an integer value larger or equal to 1")
return None, None
elif nb_indices_to_keep > 1 and distributed is None:
logger.error('nb_indices_to_keep can only be larger than 1 when distributed is "pyspark"')
return None, None
current_bytes = cast_memory_to_bytes(current_memory_available)
max_index_bytes = cast_memory_to_bytes(max_index_memory_usage)
memory_left = current_bytes - max_index_bytes
if nb_indices_to_keep == 1 and memory_left < current_bytes * 0.1:
logger.error(
"You do not have enough memory to build this index, "
"please increase current_memory_available or decrease max_index_memory_usage"
)
return None, None
if nb_cores is None:
nb_cores = multiprocessing.cpu_count()
logger.info(f"Using {nb_cores} omp threads (processes), consider increasing --nb_cores if you have more")
faiss.omp_set_num_threads(nb_cores)
if isinstance(embeddings, np.ndarray):
tmp_dir_embeddings = tempfile.TemporaryDirectory()
np.save(os.path.join(tmp_dir_embeddings.name, "emb.npy"), embeddings)
embeddings_path = tmp_dir_embeddings.name
else:
embeddings_path = embeddings # type: ignore
with Timeit("Launching the whole pipeline"):
with Timeit("Reading total number of vectors and dimension"):
_, embeddings_file_paths = get_file_list(path=embeddings_path, file_format=file_format)
nb_vectors, vec_dim, file_counts = read_total_nb_vectors_and_dim(
embeddings_file_paths, file_format=file_format, embedding_column_name=embedding_column_name
)
embeddings_file_paths, file_counts = zip( # type: ignore
*((fp, count) for fp, count in zip(embeddings_file_paths, file_counts) if count > 0)
)
embeddings_file_paths = list(embeddings_file_paths)
file_counts = list(file_counts)
logger.info(f"There are {nb_vectors} embeddings of dim {vec_dim}")
with Timeit("Compute estimated construction time of the index", indent=1):
for log_lines in get_estimated_construction_time_infos(nb_vectors, vec_dim, indent=2).split("\n"):
logger.info(log_lines)
with Timeit("Checking that your have enough memory available to create the index", indent=1):
necessary_mem, index_key_used = estimate_memory_required_for_index_creation(
nb_vectors, vec_dim, index_key, max_index_memory_usage, make_direct_map, nb_indices_to_keep
)
logger.info(
f"{cast_bytes_to_memory_string(necessary_mem)} of memory "
"will be needed to build the index (more might be used if you have more)"
)
prefix = "(default) " if index_key is None else ""
if necessary_mem > cast_memory_to_bytes(current_memory_available):
r = (
f"The current memory available on your machine ({current_memory_available}) is not "
f"enough to create the {prefix}index {index_key_used} that requires "
f"{cast_bytes_to_memory_string(necessary_mem)} to train. "
"You can decrease the number of clusters of you index since the Kmeans algorithm "
"used for clusterisation is responsible for this high memory usage."
"Consider increasing the options current_memory_available or decreasing max_index_memory_usage"
)
logger.error(r)
return None, None
if index_key is None:
with Timeit("Selecting most promising index types given data characteristics", indent=1):
best_index_keys = get_optimal_index_keys_v2(
nb_vectors,
vec_dim,
max_index_memory_usage,
make_direct_map=make_direct_map,
should_be_memory_mappable=should_be_memory_mappable,
use_gpu=use_gpu,
)
if not best_index_keys:
return None, None
index_key = best_index_keys[0]
if id_columns is not None:
logger.info(f"Id columns provided {id_columns} - will be reading the corresponding columns")
if ids_path is not None:
logger.info(f"\tWill be writing the Ids DataFrame in parquet format to {ids_path}")
fs, _ = fsspec.core.url_to_fs(ids_path)
if fs.exists(ids_path):
fs.rm(ids_path, recursive=True)
fs.mkdirs(ids_path)
else:
logger.error(
"\tAs ids_path=None - the Ids DataFrame will not be written and will be ignored subsequently"
)
logger.error("\tPlease provide a value ids_path for the Ids to be written")
def write_ids_df_to_parquet(ids: pd.DataFrame, batch_id: int):
filename = f"part-{batch_id:08d}-{uuid.uuid1()}.parquet"
output_file = os.path.join(ids_path, filename) # type: ignore
with fsspec.open(output_file, "wb") as f:
logger.debug(f"Writing id DataFrame to file {output_file}")
ids.to_parquet(f)
with Timeit("Creating the index", indent=1):
index, indices_folder = create_index(
embeddings_file_paths,
index_key,
metric_type,
nb_vectors,
current_memory_available,
use_gpu=use_gpu,
file_format=file_format,
embedding_column_name=embedding_column_name,
id_columns=id_columns,
embedding_ids_df_handler=write_ids_df_to_parquet if ids_path and id_columns else None,
make_direct_map=make_direct_map,
distributed=distributed,
temporary_indices_folder=temporary_indices_folder,
file_counts=file_counts if distributed is not None else None,
nb_indices_to_keep=nb_indices_to_keep,
)
if nb_indices_to_keep > 1:
indices_folder = cast(str, indices_folder)
index_path2_metric_infos = optimize_and_measure_indices(
indices_folder,
embedding_column_name,
embeddings_file_paths,
file_format,
index_infos_path,
index_key,
index_param,
index_path,
max_index_query_time_ms,
save_on_disk,
use_gpu,
)
for path, | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
<NAME>, <NAME>, <NAME>
Sandia National Laboratories
December 12, 2019
Sudoku Board and Cell data structures.
"""
# import json
import uuid
import config_data
import copy
import logging
logger = logging.getLogger(__name__)
class Cell():
"""
A single cell on a Sudoku board.
Each cell tracks and provides manipulation for the set of
candidate values that the Cell may take.
"""
# An ordered list of
display_list = ['1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G']
def __init__(self, identifier, value='.', degree=3):
""" Initializes a Cell with an identifier and valueset or another cell.
Args:
identifier : a string identifier within a board (value expected),
or a cell to copy
value : the collection of potential values, or '.' for complete value set
degree (int) : the number of blocks on a side (determines values)
Returns:
Cell : a new cell.
Fields:
propagated (boolean) : True if the cell assignment has been propagated to
sister cells (i.e., removing values from that cell)
Identifier parameter must be either a string or a Cell.
NOTE: the value set is copied via set.copy(), which is fine for
ints and strs, but won't work if the values become some other
complex object type.
"""
if isinstance(identifier, Cell):
# If identifier is a Cell, make a copy
self._id = identifier._id
assert isinstance(self._id, str), "Cell's id should be a string"
self._propagated = identifier._propagated
assert isinstance(self._propagated, bool), \
"Cell's _propagated should be a boolean"
self._values = sorted(list(identifier._values))
self._degree = identifier._degree
else:
# If identifier is a unique ID (str or int)
self._id = identifier
self._propagated = False
self._degree = degree
if (value == '0' or value == '.'):
# Get all possible values
self._values = sorted(Cell.getPossibleValuesByDegree(degree))
elif isinstance(value, str):
self._values = [self.getValueDisplays(
self._degree).index(value)]
elif isinstance(value, list):
self._values = sorted((value))
elif isinstance(value, int):
self._values = [value]
assert isinstance(self._values, list), "Cell's values should be a list"
@ classmethod
def getPossibleValuesByDegree(cls, degree=3):
""" Returns sorted list of all possible values for puzzle of degree.
Raises:
TypeError if degree is not squareable
"""
try:
return [x for x in range(degree ** 2)]
except TypeError:
assert False, "Cell's degree must be square-able (**2)"
@ classmethod
def getValueDisplays(cls, degree=3):
""" Returns sorted list of all display values for puzzle of degree.
Note: this could be canonicalized to save memory, but it isn't.
"""
return [cls.display_list[idx] for idx in cls.getPossibleValuesByDegree(degree)]
@ classmethod
def displayValues(cls, values):
""" Returns list of displays of the sorted list of values.
"""
return sorted([cls.display_list[idx] for idx in values])
@ classmethod
def displayValue(cls, val):
""" Returns displays of the value val.
"""
return cls.display_list[val]
def __str__(self):
return 'Cell(ID=' + str(self._id) + \
', Propagated=' + str(self._propagated) + \
', ValueSet={' + self.getStateStr(True) + '})'
def assign(self, value):
"""
Assigns value to self, removing all other candidates.
Args:
value : the only value this cell can take on
Returns:
boolean : True if other values were eliminated by this assignment
False if no cell update occurred
Raises:
AssertionError : if value was not a valid possibility
"""
assert value in self._values, \
"Cannot assign %s to Cell %s" % (
str(value), str(self.getIdentifier()))
if len(self._values) > 1:
self._values = [value]
return True
return False
def exclude(self, value):
"""
Remove value from self's set of candidate values.
Return True if the value was present, False otherwise.
"""
try:
self._values.remove(value)
return True
except ValueError:
return False
def getCertainValue(self):
"""
If cell has only one candidate value, return it
Otherwise return None
"""
if(len(self._values) == 1):
return self._values[0]
return None
def getIdentifier(self):
""" Return identifier for self. """
return self._id
def getStateStr(self, uncertain=False, goal_cell = None):
"""
If uncertain is False, then return value if Cell is certain
otherwise return '.'
If uncertain is True, return the value set as a string
"""
displays = Cell.getValueDisplays(self._degree)
width = sum([len(x) for x in displays]) + 1
if(uncertain):
s = [displays[val] for val in self.getValues()]
if not s:
# Underconstrained: highlight a conflict
return str.center('X', width)
s = str(''.join(s))
if goal_cell == self.getIdentifier():
# Highlight the goal cell
s = '*' + s + '*'
elif goal_cell and goal_cell[1] == self.getIdentifier()[1]:
# Save space to match with the goal cell
s = ' ' + s + ' '
return str.center(''.join(s), width)
elif self.isCertain():
return displays[self.getCertainValue()] + ' '
else:
return '. '
def getValues(self):
""" Return ordered list of current possible values. """
return sorted(self._values)
def getValueSet(self):
""" Return set of current possible values. """
return set(self._values)
def hasValue(self, value):
""" Return True iff value is possible in this Cell. """
return value in self._values
def isCertain(self):
""" Return True iff this Cell has only one possible value. """
return len(self._values) == 1
def isOverConstrained(self):
""" Return True iff this Cell has no possible values remaining. """
return len(self._values) == 0
def isPropagated(self):
return self._propagated
def setPropagated(self):
self._propagated = True
# -----------------------------------------------------
class Board():
"""
A single Sudoku board that maintains the current uncertainty state.
A Board is an associative memory of cells indexed by location
(e.g. A1, D3, etc.)
Boards merely maintain state and answer questions about the state,
see Unit and Solver for manipulation methods.
"""
unit_defns = {}
unit_map = {}
@ classmethod
def getCellUnits(cls, cell_id, degree=3):
""" Return units associated with cell_id in a puzzle of degree.
cell_id may be a string or a Cell.
"""
if not isinstance(cell_id, str):
cell_id = cell_id.getIdentifier()
return cls.unit_map[degree][cell_id]
@ classmethod
def getUnitCells(cls, unit_id, degree=3):
""" Return cells associated with unit_id in a puzzle of degree. """
return cls.unit_defns[degree][unit_id]
@ classmethod
def getAllCells(cls, degree=3):
""" Get all cell names in a puzzle of degree. """
return cls.unit_map[degree].keys()
@ classmethod
def getAllUnits(cls, degree=3):
""" Get all unit names in a puzzle of degree. """
return cls.unit_defns[degree].keys()
@ classmethod
def getSortedRows(cls, degree=3):
""" Get all unit names in a puzzle of degree. """
return sorted([name for name in filter(lambda x: cls.getUnitType(x) == 'row',
cls.unit_defns[degree].keys())])
@ classmethod
def getUnitType(cls, unit):
""" Get the type of the unit.
Note: previously we kept collections identifying the unit names,
which was cleaner, but for now we're relying on the
encoding in the unit name.
"""
if unit[0] == 'c':
return 'column'
elif unit[0] == 'r':
return 'row'
elif unit[0] == 'b':
return 'box'
else:
return 'Invalid Input'
@ classmethod
def getAssociatedCellIds(cls, cell_id):
"""
Get all cell IDs in units associated with
target cell, without repeats.
Return empty list if no cell_id is given
"""
associated_cells = []
if cell_id:
associated_units = cls.getCellUnits(cell_id)
for unit_id in associated_units:
unit_cells = cls.getUnitCells(unit_id)
for unit_cell in unit_cells:
if unit_cell not in associated_cells and unit_cell != cell_id:
associated_cells.append(unit_cell)
return associated_cells
@ classmethod
def getCommonCells(cls, cell_id_list):
"""
Get list of all cells jointly associated
to all cells in the list
"""
common_cell_set = set(cls.getAssociatedCellIds(cell_id_list[0]))
for cell_id in cell_id_list:
common_cell_set = common_cell_set & set(
cls.getAssociatedCellIds(cell_id))
return list(common_cell_set)
@ classmethod
def getCommonUnits(cls, cell_id_list):
"""
Get list of all units jointly associated
to all cells in the list
"""
common_unit_set = set(cls.getCellUnits(cell_id_list[0]))
for cell_id in cell_id_list:
common_unit_set = common_unit_set & set(cls.getCellUnits(cell_id))
return list(common_unit_set)
@ classmethod
def getUnionUnitSet(cls, cell_id_list):
"""
Get union set of units for the given cells
"""
union_unit_set = set(cls.getCellUnits(cell_id_list[0]))
for cell_id in cell_id_list:
union_unit_set = union_unit_set | set(cls.getCellUnits(cell_id))
return union_unit_set
@ classmethod
def getCellID(cls, row, col):
""" Returns cell identifier given row and column identifier strings. """
r = row[1]
c = col[1:]
return r + c
@ classmethod
def getCellIDFromArrayIndex(cls, row, col):
""" Returns cell identifier given row and column integer. """
rnm = cls._rname(row)
cnm = cls._cname(col)
return cls.getCellID(rnm, cnm)
@ classmethod
def getBoxID(cls, row, col, deg):
""" Returns box identifier given row ('rX') and column ('cY[Y]') identifier and puzzle degree. """
r = row[1]
c = col[1:]
# 1. Convert r back to int past 0 ('A' is 1)
# 2. Bump up to next round for divide to | |
using an exec based plugin.
:param flocker: Flocker represents a Flocker volume attached to a kubelet's host \
machine and exposed to the pod for its usage. This depends on the Flocker \
control service being running
:param gce_persistent_disk: GCEPersistentDisk represents a GCE Disk resource that \
is attached to a kubelet's host machine and then exposed to the pod. \
Provisioned by an admin. More info: \
https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
:param glusterfs: Glusterfs represents a Glusterfs volume that is attached to a \
host and exposed to the pod. Provisioned by an admin. More info: \
https://examples.k8s.io/volumes/glusterfs/README.md
:param iscsi: ISCSI represents an ISCSI Disk resource that is attached to a \
kubelet's host machine and then exposed to the pod. Provisioned by an admin.
:param local: Local represents directly-attached storage with node affinity
:param mount_options: A list of mount options, e.g. ["ro", "soft"]. Not validated - \
mount will simply fail if one is invalid. More info: \
https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
:param nfs: NFS represents an NFS mount on the host. Provisioned by an admin. More \
info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
:param node_affinity: NodeAffinity defines constraints that limit what nodes this \
volume can be accessed from. This field influences the scheduling of pods that \
use this volume.
:param persistent_volume_reclaim_policy: What happens to a persistent volume when \
released from its claim. Valid options are Retain (default for manually \
created PersistentVolumes), Delete (default for dynamically provisioned \
PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the \
volume plugin underlying this PersistentVolume. More info: \
https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming
:param photon_persistent_disk: PhotonPersistentDisk represents a PhotonController \
persistent disk attached and mounted on kubelets host machine
:param portworx_volume: PortworxVolume represents a portworx volume attached and \
mounted on kubelets host machine
:param quobyte: Quobyte represents a Quobyte mount on the host that shares a pod's \
lifetime
:param rbd: RBD represents a Rados Block Device mount on the host that shares a \
pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md
:param scale_io: ScaleIO represents a ScaleIO persistent volume attached and \
mounted on Kubernetes nodes.
:param storage_class_name: Name of StorageClass to which this persistent volume \
belongs. Empty value means that this volume does not belong to any \
StorageClass.
:param storageos: StorageOS represents a StorageOS volume that is attached to the \
kubelet's host machine and mounted into the pod More info: \
https://examples.k8s.io/volumes/storageos/README.md
:param volume_mode: volumeMode defines if a volume is intended to be used with a \
formatted filesystem or to remain in raw block state. Value of Filesystem is \
implied when not included in spec.
:param vsphere_volume: VsphereVolume represents a vSphere volume attached and \
mounted on kubelets host machine
"""
def __init__(
self,
access_modes: List[str],
capacity: Optional[dict] = None,
host_path: Optional[HostPathVolumeSource] = None,
aws_elastic_block_store: Optional[AWSElasticBlockStoreVolumeSource] = None,
azure_disk: Optional[AzureDiskVolumeSource] = None,
azure_file: Optional[AzureFilePersistentVolumeSource] = None,
cephfs: Optional[CephFSPersistentVolumeSource] = None,
cinder: Optional[CinderPersistentVolumeSource] = None,
claim_ref: Optional[ObjectReference] = None,
csi: Optional[CSIPersistentVolumeSource] = None,
fc: Optional[FCVolumeSource] = None,
flex_volume: Optional[FlexPersistentVolumeSource] = None,
flocker: Optional[FlockerVolumeSource] = None,
gce_persistent_disk: Optional[GCEPersistentDiskVolumeSource] = None,
glusterfs: Optional[GlusterfsPersistentVolumeSource] = None,
iscsi: Optional[ISCSIPersistentVolumeSource] = None,
local: Optional[LocalVolumeSource] = None,
mount_options: Optional[List[str]] = None,
nfs: Optional[NFSVolumeSource] = None,
node_affinity: Optional[VolumeNodeAffinity] = None,
persistent_volume_reclaim_policy: Optional[str] = None,
photon_persistent_disk: Optional[PhotonPersistentDiskVolumeSource] = None,
portworx_volume: Optional[PortworxVolumeSource] = None,
quobyte: Optional[QuobyteVolumeSource] = None,
rbd: Optional[RBDPersistentVolumeSource] = None,
scale_io: Optional[ScaleIOPersistentVolumeSource] = None,
storage_class_name: Optional[str] = None,
storageos: Optional[StorageOSPersistentVolumeSource] = None,
volume_mode: Optional[str] = None,
vsphere_volume: Optional[VsphereVirtualDiskVolumeSource] = None,
):
self.accessModes = access_modes
self.capacity = capacity
self.hostPath = host_path
self.awsElasticBlockStore = aws_elastic_block_store
self.azureDisk = azure_disk
self.azureFile = azure_file
self.cephfs = cephfs
self.cinder = cinder
self.claimRef = claim_ref
self.csi = csi
self.fc = fc
self.flexVolume = flex_volume
self.flocker = flocker
self.gcePersistentDisk = gce_persistent_disk
self.glusterfs = glusterfs
self.iscsi = iscsi
self.local = local
self.mountOptions = mount_options
self.nfs = nfs
self.nodeAffinity = node_affinity
self.persistentVolumeReclaimPolicy = persistent_volume_reclaim_policy
self.photonPersistentDisk = photon_persistent_disk
self.portworxVolume = portworx_volume
self.quobyte = quobyte
self.rbd = rbd
self.scaleIO = scale_io
self.storageClassName = storage_class_name
self.storageos = storageos
self.volumeMode = volume_mode
self.vsphereVolume = vsphere_volume
class LoadBalancerIngress(HelmYaml):
"""
:param hostname: Hostname is set for load-balancer ingress points that are DNS \
based (typically AWS load-balancers)
:param ip: IP is set for load-balancer ingress points that are IP based (typically \
GCE or OpenStack load-balancers)
"""
def __init__(self, hostname: str, ip: str):
self.hostname = hostname
self.ip = ip
class Taint(HelmYaml):
"""
:param effect: Required. The effect of the taint on pods that do not tolerate the \
taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.
:param key: Required. The taint key to be applied to a node.
:param time_added: TimeAdded represents the time at which the taint was added. It \
is only written for NoExecute taints.
:param value: The taint value corresponding to the taint key.
"""
def __init__(
self,
effect: str,
key: str,
time_added: Optional[datetime] = None,
value: Optional[str] = None,
):
self.effect = effect
self.key = key
self.timeAdded = self._get_kube_date_string(time_added)
self.value = value
@staticmethod
def _get_kube_date_string(datetime_obj: Optional[datetime]):
return (
datetime_obj.strftime("%Y-%m-%dT%H:%M:%SZ%Z")
if datetime_obj
else datetime_obj
)
class ConfigMapNodeConfigSource(HelmYaml):
"""
:param name: Name is the metadata.name of the referenced ConfigMap. This field is \
required in all cases.
:param kubelet_config_key: KubeletConfigKey declares which key of the referenced \
ConfigMap corresponds to the KubeletConfiguration structure This field is \
required in all cases.
:param namespace: Namespace is the metadata.namespace of the referenced ConfigMap. \
This field is required in all cases.
:param resource_version: ResourceVersion is the metadata.ResourceVersion of the \
referenced ConfigMap. This field is forbidden in Node.Spec, and required in \
Node.Status.
:param uid: UID is the metadata.UID of the referenced ConfigMap. This field is \
forbidden in Node.Spec, and required in Node.Status.
"""
def __init__(
self,
name: str,
kubelet_config_key: str,
namespace: str,
resource_version: Optional[str] = None,
uid: Optional[str] = None,
):
self.name = name
self.kubeletConfigKey = kubelet_config_key
self.namespace = namespace
self.resourceVersion = resource_version
self.uid = uid
class EndpointPort(HelmYaml):
"""
:param name: The name of this port. This must match the 'name' field in the \
corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is \
defined.
:param app_protocol: The application protocol for this port. This field follows \
standard Kubernetes label syntax. Un-prefixed names are reserved for IANA \
standard service names (as per RFC-6335 and \
http://www.iana.org/assignments/service-names). Non-standard protocols should \
use prefixed names such as mycompany.com/my-custom-protocol. Field can be \
enabled with ServiceAppProtocol feature gate.
:param port: The port number of the endpoint.
:param protocol: The IP protocol for this port. Must be UDP, TCP, or SCTP. Default \
is TCP.
"""
def __init__(
self, name: str, app_protocol: str, port: int, protocol: Optional[str] = None
):
self.name = name
self.appProtocol = app_protocol
self.port = port
self.protocol = protocol
class EndpointAddress(HelmYaml):
"""
:param hostname: The Hostname of this endpoint
:param ip: The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local \
(169.254.0.0/16), or link-local multicast ((172.16.17.32/24). IPv6 is also \
accepted but not fully supported on all platforms. Also, certain kubernetes \
components, like kube-proxy, are not IPv6 ready.
:param node_name: Optional: Node hosting this endpoint. This can be used to \
determine endpoints local to a node.
:param target_ref: Reference to object providing the endpoint.
"""
def __init__(
self,
hostname: str,
ip: str,
node_name: Optional[str] = None,
target_ref: Optional[ObjectReference] = None,
):
self.hostname = hostname
self.ip = ip
self.nodeName = node_name
self.targetRef = target_ref
class EndpointSubset(HelmYaml):
"""
:param addresses: IP addresses which offer the related ports that are marked as \
ready. These endpoints should be considered safe for load balancers and \
clients to utilize.
:param not_ready_addresses: IP addresses which offer the related ports but are not \
currently marked as ready because they have not yet finished starting, have \
recently failed a readiness check, or have recently failed a liveness check.
:param ports: Port numbers available on the related IP addresses.
"""
def __init__(
self,
addresses: List[EndpointAddress],
not_ready_addresses: Optional[List[EndpointAddress]] = | |
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_152(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_61(lexerbuf)
return result
def _sedlex_rnd_151(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_60(lexerbuf)
return result
def _sedlex_st_59(lexerbuf: lexbuf):
result = -1
state_id = _sedlex_decide_20(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_150[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_149(lexerbuf: lexbuf):
result = -1
result = 60
return result
def _sedlex_rnd_148(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_52(lexerbuf)
return result
def _sedlex_st_58(lexerbuf: lexbuf):
result = -1
state_id = _sedlex_decide_19(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_147[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_146(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_59(lexerbuf)
return result
def _sedlex_rnd_145(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_52(lexerbuf)
return result
def _sedlex_st_57(lexerbuf: lexbuf):
result = -1
state_id = _sedlex_decide_20(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_144[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_143(lexerbuf: lexbuf):
result = -1
result = 60
return result
def _sedlex_rnd_142(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_55(lexerbuf)
return result
def _sedlex_st_56(lexerbuf: lexbuf):
result = -1
state_id = _sedlex_decide_19(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_141[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_140(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_57(lexerbuf)
return result
def _sedlex_rnd_139(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_55(lexerbuf)
return result
def _sedlex_st_55(lexerbuf: lexbuf):
result = -1
state_id = _sedlex_decide_20(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_138[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_137(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_56(lexerbuf)
return result
def _sedlex_rnd_136(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_55(lexerbuf)
return result
def _sedlex_st_54(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 60)
state_id = _sedlex_decide_19(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_135[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_134(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_57(lexerbuf)
return result
def _sedlex_rnd_133(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_55(lexerbuf)
return result
def _sedlex_st_53(lexerbuf: lexbuf):
result = -1
state_id = _sedlex_decide_20(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_132[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_131(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_54(lexerbuf)
return result
def _sedlex_rnd_130(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_52(lexerbuf)
return result
def _sedlex_st_52(lexerbuf: lexbuf):
result = -1
state_id = _sedlex_decide_21(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_129[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_21(c: int):
if c <= -1:
return -1
else:
if c <= 93:
return _sedlex_DT_table_14[c - 0] - 1
else:
return 0
def _sedlex_rnd_128(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_58(lexerbuf)
return result
def _sedlex_rnd_127(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_53(lexerbuf)
return result
def _sedlex_rnd_126(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_52(lexerbuf)
return result
def _sedlex_st_50(lexerbuf: lexbuf):
result = -1
state_id = _sedlex_decide_20(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_125[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_20(c: int):
if c <= -1:
return -1
else:
if c <= 93:
return _sedlex_DT_table_13[c - 0] - 1
else:
return 0
def _sedlex_rnd_124(lexerbuf: lexbuf):
result = -1
result = 60
return result
def _sedlex_rnd_123(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_49(lexerbuf)
return result
def _sedlex_st_49(lexerbuf: lexbuf):
result = -1
state_id = _sedlex_decide_19(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_122[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_19(c: int):
if c <= -1:
return -1
else:
if c <= 61:
return _sedlex_DT_table_12[c - 0] - 1
else:
return 0
def _sedlex_rnd_121(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_50(lexerbuf)
return result
def _sedlex_rnd_120(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_49(lexerbuf)
return result
def _sedlex_st_48(lexerbuf: lexbuf):
result = -1
state_id = _sedlex_decide_18(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_119[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_18(c: int):
if c <= -1:
return -1
else:
if c <= 91:
return _sedlex_DT_table_11[c - 0] - 1
else:
return 0
def _sedlex_rnd_118(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_52(lexerbuf)
return result
def _sedlex_rnd_117(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_50(lexerbuf)
return result
def _sedlex_rnd_116(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_49(lexerbuf)
return result
def _sedlex_st_47(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 25)
state_id = _sedlex_decide_17(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_115[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_17(c: int):
if c <= 60:
return -1
else:
if c <= 91:
return _sedlex_DT_table_10[c - 61] - 1
else:
return -1
def _sedlex_rnd_114(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_60(lexerbuf)
return result
def _sedlex_rnd_113(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_48(lexerbuf)
return result
def _sedlex_st_46(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 57)
state_id = _sedlex_decide_16(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_112[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_111(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_46(lexerbuf)
return result
def _sedlex_st_45(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 57)
state_id = _sedlex_decide_16(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_110[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_16(c: int):
if c <= 47:
return -1
else:
if c <= 122:
return _sedlex_DT_table_9[c - 48] - 1
else:
return -1
def _sedlex_rnd_109(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_46(lexerbuf)
return result
def _sedlex_st_42(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 22)
state_id = _sedlex_decide_15(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_108[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_15(c: int):
if c <= 60:
return -1
else:
if c <= 62:
return _sedlex_DT_table_8[c - 61] - 1
else:
return -1
def _sedlex_rnd_107(lexerbuf: lexbuf):
result = -1
result = 24
return result
def _sedlex_rnd_106(lexerbuf: lexbuf):
result = -1
result = 23
return result
def _sedlex_st_40(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 20)
state_id = _sedlex_decide_14(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_105[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_14(c: int):
if c <= 60:
return -1
else:
if c <= 61:
return 0
else:
return -1
def _sedlex_rnd_104(lexerbuf: lexbuf):
result = -1
result = 21
return result
def _sedlex_st_37(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 17)
state_id = _sedlex_decide_13(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_103[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_13(c: int):
if c <= 59:
return -1
else:
if c <= 61:
return _sedlex_DT_table_8[c - 60] - 1
else:
return -1
def _sedlex_rnd_102(lexerbuf: lexbuf):
result = -1
result = 19
return result
def _sedlex_rnd_101(lexerbuf: lexbuf):
result = -1
result = 18
return result
def _sedlex_st_34(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 14)
state_id = _sedlex_decide_12(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_100[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_12(c: int):
if c <= 57:
return -1
else:
if c <= 58:
return 0
else:
return -1
def _sedlex_rnd_99(lexerbuf: lexbuf):
result = -1
result = 15
return result
def _sedlex_st_33(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 58)
state_id = _sedlex_decide_10(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_98[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_97(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_28(lexerbuf)
return result
def _sedlex_rnd_96(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_30(lexerbuf)
return result
def _sedlex_rnd_95(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_26(lexerbuf)
return result
def _sedlex_st_32(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 58)
state_id = _sedlex_decide_11(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_94[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_93(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_32(lexerbuf)
return result
def _sedlex_st_31(lexerbuf: lexbuf):
result = -1
state_id = _sedlex_decide_11(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_92[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_11(c: int):
if c <= 47:
return -1
else:
if c <= 122:
return _sedlex_DT_table_7[c - 48] - 1
else:
return -1
def _sedlex_rnd_91(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_32(lexerbuf)
return result
def _sedlex_st_30(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 58)
state_id = _sedlex_decide_10(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_90[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_10(c: int):
if c <= 45:
return -1
else:
if c <= 101:
return _sedlex_DT_table_6[c - 46] - 1
else:
return -1
def _sedlex_rnd_89(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_28(lexerbuf)
return result
def _sedlex_rnd_88(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_30(lexerbuf)
return result
def _sedlex_rnd_87(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_26(lexerbuf)
return result
def _sedlex_st_29(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 58)
state_id = _sedlex_decide_8(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_86[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_85(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_29(lexerbuf)
return result
def _sedlex_st_28(lexerbuf: lexbuf):
result = -1
state_id = _sedlex_decide_8(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_84[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_rnd_83(lexerbuf: lexbuf):
result = -1
result = _sedlex_st_29(lexerbuf)
return result
def _sedlex_st_27(lexerbuf: lexbuf):
result = -1
mark(lexerbuf, 58)
state_id = _sedlex_decide_9(public_next_int(lexerbuf))
if state_id >= 0:
result = _sedlex_rnd_82[state_id](lexerbuf)
else:
result = backtrack(lexerbuf)
return result
def _sedlex_decide_9(c: int):
if c <= 47:
return | |
""" The portalpy module for working with the ArcGIS Online and Portal APIs."""
from __future__ import absolute_import
import copy
import json
import imghdr
import logging
import os
import tempfile
from .connection import _ArcGISConnection, _normalize_url
from .connection import _is_http_url
from .connection import _parse_hostname, _unpack
from .common._utils import _to_utf8
from six.moves.urllib import request
from six.moves.urllib_parse import urlparse
__version__ = '1.7.0'
_log = logging.getLogger(__name__)
class Portal(object):
""" An object representing a connection to a single portal (via URL).
.. note:: To instantiate a Portal object execute code like this:
PortalPy.Portal(portalUrl, user, password)
There are a few things you should know as you use the methods below.
Group IDs - Many of the group functions require a group id. This id is
different than the group's name or title. To determine
a group id, use the search_groups function using the title
to get the group id.
Time - Many of the methods return a time field. All time is
returned as millseconds since 1 January 1970. Python
expects time in seconds since 1 January 1970 so make sure
to divide times from PortalPy by 1000. See the example
a few lines down to see how to convert from PortalPy time
to Python time.
Example - converting time
.. code-block:: python
import time
.
.
.
group = portalAdmin.get_group('67e1761068b7453693a0c68c92a62e2e')
pythontime = time.ctime(group['created']/1000)
Example - list users in group
.. code-block:: python
portal = PortalPy.Portal(portalUrl, user, password)
resp = portal.get_group_members('67e1761068b7453693a0c68c92a62e2e')
for user in resp['users']:
print user
Example - create a group
.. code-block:: python
portal= PortalPy.Portal(portalUrl, user, password)
group_id = portalAdmin.create_group('my group', 'test tag', 'a group to share travel maps')
Example - delete a user named amy and assign her content to bob
.. code-block:: python
portal = PortalPy.Portal(portalUrl, user, password)
portal.delete_user('amy.user', True, 'bob.user')
"""
_is_arcpy = False
def __init__(self, url, username=None, password=<PASSWORD>, key_file=None,
cert_file=None, expiration=60, referer=None, proxy_host=None,
proxy_port=None, connection=None, workdir=tempfile.gettempdir(),
tokenurl=None, verify_cert=True, client_id=None):
""" The Portal constructor. Requires URL and optionally username/password."""
url = url.strip() # be permissive in accepting home app urls
homepos = url.find('/home')
if homepos != -1:
url = url[:homepos]
self._is_arcpy = url.lower() == "pro"
if self._is_arcpy:
try:
import arcpy
url = arcpy.GetActivePortalURL()
self.url = url
except ImportError:
raise ImportError("Could not import arcpy")
except:
raise ValueError("Could not use Pro authentication.")
else:
self.url = url
if url:
normalized_url = self.url
'''_normalize_url(self.url)'''
if not normalized_url[-1] == '/':
normalized_url += '/'
if normalized_url.lower().find("www.arcgis.com") > -1:
urlscheme = urlparse(normalized_url).scheme
self.resturl = "{scheme}://www.arcgis.com/sharing/rest/".format(scheme=urlscheme)
elif normalized_url.lower().endswith("sharing/"):
self.resturl = normalized_url + 'rest/'
elif normalized_url.lower().endswith("sharing/rest/"):
self.resturl = normalized_url
else:
self.resturl = normalized_url + 'sharing/rest/'
self.hostname = _parse_hostname(url)
self.workdir = workdir
# Setup the instance members
self._basepostdata = { 'f': 'json' }
self._version = None
self._properties = None
self._resources = None
self._languages = None
self._regions = None
self._is_pre_162 = False
self._is_pre_21 = False
# If a connection was passed in, use it, otherwise setup the
# connection (use all SSL until portal informs us otherwise)
if connection:
_log.debug('Using existing connection to: ' + \
_parse_hostname(connection.baseurl))
self.con = connection
if not connection:
_log.debug('Connecting to portal: ' + self.hostname)
if self._is_arcpy:
self.con = _ArcGISConnection(baseurl="pro",
tokenurl=tokenurl,
username=username,
password=password,
key_file=key_file,
cert_file=cert_file,
expiration=expiration,
all_ssl=True,
referer=referer,
proxy_host=proxy_host,
proxy_port=proxy_port,
verify_cert=verify_cert)
else:
self.con = _ArcGISConnection(baseurl=self.resturl,
tokenurl=tokenurl,
username=username,
password=password,
key_file=key_file,
cert_file=cert_file,
expiration=expiration,
all_ssl=True,
referer=referer,
proxy_host=proxy_host,
proxy_port=proxy_port,
verify_cert=verify_cert,
client_id=client_id)
#self.get_version(True)
self.get_properties(True)
def add_group_users(self, user_names, group_id):
""" Adds users to the group specified.
.. note::
This method will only work if the user for the
Portal object is either an administrator for the entire
Portal or the owner of the group.
============ ======================================
**Argument** **Description**
------------ --------------------------------------
user_names list of usernames
------------ --------------------------------------
group_id required string, specifying group id
============ ======================================
:return:
A dictionary with a key of "not_added" which contains the users that were not
added to the group.
"""
if self._is_pre_21:
_log.warning('The auto_accept option is not supported in ' \
+ 'pre-2.0 portals')
return
#user_names = _unpack(user_names, 'username')
postdata = self._postdata()
postdata['users'] = ','.join(user_names)
resp = self.con.post('community/groups/' + group_id + '/addUsers',
postdata)
return resp
def add_item(self, item_properties, data=None, thumbnail=None, metadata=None, owner=None, folder=None):
""" Adds content to a Portal.
.. note::
That content can be a file (such as a layer package, geoprocessing package,
map package) or it can be a URL (to an ArcGIS Server service, WMS service,
or an application).
If you are uploading a package or other file, provide a path or URL
to the file in the data argument.
From a technical perspective, none of the item properties below are required. However,
it is strongly recommended that title, type, typeKeywords, tags, snippet, and description
be provided.
============ ====================================================
**Argument** **Description**
------------ ----------------------------------------------------
item_properties required dictionary, see below for the keys and values
------------ ----------------------------------------------------
data optional string, either a path or URL to the data
------------ ----------------------------------------------------
thumbnail optional string, either a path or URL to an image
------------ ----------------------------------------------------
metadata optional string, either a path or URL to metadata.
------------ ----------------------------------------------------
owner optional string, defaults to logged in user.
------------ ----------------------------------------------------
folder optional string, content folder where placing item
============ ====================================================
================ ============================================================================
**Key** **Value**
---------------- ----------------------------------------------------------------------------
type optional string, indicates type of item. See URL 1 below for valid values.
---------------- ----------------------------------------------------------------------------
typeKeywords optional string list. Lists all sub-types. See URL 1 for valid values.
---------------- ----------------------------------------------------------------------------
description optional string. Description of the item.
---------------- ----------------------------------------------------------------------------
title optional string. Name of the item.
---------------- ----------------------------------------------------------------------------
url optional string. URL to item that are based on URLs.
---------------- ----------------------------------------------------------------------------
tags optional string of comma-separated values. Used for searches on items.
---------------- ----------------------------------------------------------------------------
snippet optional string. Provides a very short summary of the what the item is.
---------------- ----------------------------------------------------------------------------
extent optional string with comma separated values for min x, min y, max x, max y.
---------------- ----------------------------------------------------------------------------
spatialReference optional string. Coordinate system that the item is in.
---------------- ----------------------------------------------------------------------------
accessInformation optional string. Information on the source of the content.
---------------- ----------------------------------------------------------------------------
licenseInfo optional string, any license information or restrictions regarding the content.
---------------- ----------------------------------------------------------------------------
culture optional string. Locale, country and language information.
---------------- ----------------------------------------------------------------------------
access optional string. Valid values: private, shared, org, or public.
---------------- ----------------------------------------------------------------------------
commentsEnabled optional boolean. Default is true. Controls whether comments are allowed.
---------------- ----------------------------------------------------------------------------
culture optional string. Language and country information.
================ ============================================================================
URL 1: http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#//02r3000000ms000000
:return:
The item id of the uploaded item if successful, None if unsuccessful.
"""
# Postdata is a dictionary object whose keys and values will be sent via an HTTP Post.
postdata = self._postdata()
postdata.update(_to_utf8(item_properties))
# Build the files list (tuples)
files = []
if data:
if _is_http_url(data):
data = request.urlretrieve(data)[0]
else:
if not os.path.isfile(os.path.abspath(data)):
raise RuntimeError("File("+data+") not found.")
files.append(('file', data, os.path.basename(data)))
if metadata:
if _is_http_url(metadata):
metadata = request.urlretrieve(metadata)[0]
files.append(('metadata', metadata, 'metadata.xml'))
if thumbnail:
if _is_http_url(thumbnail):
thumbnail = request.urlretrieve(thumbnail)[0]
file_ext = os.path.splitext(thumbnail)[1]
if not file_ext:
file_ext = imghdr.what(thumbnail)
if file_ext in ('gif', 'png', 'jpeg'):
new_thumbnail = thumbnail + '.' + file_ext
os.rename(thumbnail, new_thumbnail)
thumbnail = new_thumbnail
files.append(('thumbnail', thumbnail, os.path.basename(thumbnail)))
# If owner isn't specified, use the logged in user
if not owner:
owner = self.logged_in_user()['username']
# Setup the item path, including the folder, and post to it
path = 'content/users/' + owner
if folder and folder != '/':
folder_id = self.get_folder_id(owner, folder)
path += '/' + folder_id
path += '/addItem'
resp = self.con.post(path, postdata, files)
if resp and resp.get('success'):
return resp['id']
def publish_item(self, itemid, data=None, text=None, fileType="serviceDefinition", publishParameters=None,
outputType=None, overwrite=False, owner=None, folder=None, buildInitialCache=False):
"""
Publishes a hosted service based on an existing source item.
Publishers can create feature services as well as tiled map services.
Feature services can be created using input files of type csv, shapefile, serviceDefinition, featureCollection, and fileGeodatabase.
CSV files that contain location fields, (ie.address fields or X, Y fields) are spatially enabled during the process of publishing.
Shapefiles and file geodatabases should be packaged as *.zip files.
Tiled map services can be created from service definition (*.sd) files, tile packages, and existing feature services.
Service definitions are authored in ArcGIS for Desktop and contain both the cartographic definition | |
Description: 查询os通过当前云
Summary: 查询os通过当前云
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindosbycurrentcloudResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.findosbycurrentcloud.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_findosbycurrentcloud_ex_async(
self,
request: deps_models.QueryBuildpackFindosbycurrentcloudRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindosbycurrentcloudResponse:
"""
Description: 查询os通过当前云
Summary: 查询os通过当前云
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindosbycurrentcloudResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.findosbycurrentcloud.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_findbyappsv(
self,
request: deps_models.QueryBuildpackFindbyappsvRequest,
) -> deps_models.QueryBuildpackFindbyappsvResponse:
"""
Description: 通过appv1查询buildpack
Summary: 查询buildpack
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_findbyappsv_ex(request, headers, runtime)
async def query_buildpack_findbyappsv_async(
self,
request: deps_models.QueryBuildpackFindbyappsvRequest,
) -> deps_models.QueryBuildpackFindbyappsvResponse:
"""
Description: 通过appv1查询buildpack
Summary: 查询buildpack
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_findbyappsv_ex_async(request, headers, runtime)
def query_buildpack_findbyappsv_ex(
self,
request: deps_models.QueryBuildpackFindbyappsvRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindbyappsvResponse:
"""
Description: 通过appv1查询buildpack
Summary: 查询buildpack
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindbyappsvResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.findbyappsv.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_findbyappsv_ex_async(
self,
request: deps_models.QueryBuildpackFindbyappsvRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindbyappsvResponse:
"""
Description: 通过appv1查询buildpack
Summary: 查询buildpack
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindbyappsvResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.findbyappsv.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_findbyapps(
self,
request: deps_models.QueryBuildpackFindbyappsRequest,
) -> deps_models.QueryBuildpackFindbyappsResponse:
"""
Description: 通过app查询
Summary: 通过app查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_findbyapps_ex(request, headers, runtime)
async def query_buildpack_findbyapps_async(
self,
request: deps_models.QueryBuildpackFindbyappsRequest,
) -> deps_models.QueryBuildpackFindbyappsResponse:
"""
Description: 通过app查询
Summary: 通过app查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_findbyapps_ex_async(request, headers, runtime)
def query_buildpack_findbyapps_ex(
self,
request: deps_models.QueryBuildpackFindbyappsRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindbyappsResponse:
"""
Description: 通过app查询
Summary: 通过app查询
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindbyappsResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.findbyapps.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_findbyapps_ex_async(
self,
request: deps_models.QueryBuildpackFindbyappsRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindbyappsResponse:
"""
Description: 通过app查询
Summary: 通过app查询
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindbyappsResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.findbyapps.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_findbyappservices(
self,
request: deps_models.QueryBuildpackFindbyappservicesRequest,
) -> deps_models.QueryBuildpackFindbyappservicesResponse:
"""
Description: 通过app服务查询
Summary: 通过app服务查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_findbyappservices_ex(request, headers, runtime)
async def query_buildpack_findbyappservices_async(
self,
request: deps_models.QueryBuildpackFindbyappservicesRequest,
) -> deps_models.QueryBuildpackFindbyappservicesResponse:
"""
Description: 通过app服务查询
Summary: 通过app服务查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_findbyappservices_ex_async(request, headers, runtime)
def query_buildpack_findbyappservices_ex(
self,
request: deps_models.QueryBuildpackFindbyappservicesRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindbyappservicesResponse:
"""
Description: 通过app服务查询
Summary: 通过app服务查询
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindbyappservicesResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.findbyappservices.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_findbyappservices_ex_async(
self,
request: deps_models.QueryBuildpackFindbyappservicesRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindbyappservicesResponse:
"""
Description: 通过app服务查询
Summary: 通过app服务查询
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindbyappservicesResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.findbyappservices.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_findbyappv(
self,
request: deps_models.QueryBuildpackFindbyappvRequest,
) -> deps_models.QueryBuildpackFindbyappvResponse:
"""
Description: 通过appv1查询
Summary: 通过appv1查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_findbyappv_ex(request, headers, runtime)
async def query_buildpack_findbyappv_async(
self,
request: deps_models.QueryBuildpackFindbyappvRequest,
) -> deps_models.QueryBuildpackFindbyappvResponse:
"""
Description: 通过appv1查询
Summary: 通过appv1查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_findbyappv_ex_async(request, headers, runtime)
def query_buildpack_findbyappv_ex(
self,
request: deps_models.QueryBuildpackFindbyappvRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindbyappvResponse:
"""
Description: 通过appv1查询
Summary: 通过appv1查询
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindbyappvResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.findbyappv.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_findbyappv_ex_async(
self,
request: deps_models.QueryBuildpackFindbyappvRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindbyappvResponse:
"""
Description: 通过appv1查询
Summary: 通过appv1查询
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindbyappvResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.findbyappv.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_findbyapp(
self,
request: deps_models.QueryBuildpackFindbyappRequest,
) -> deps_models.QueryBuildpackFindbyappResponse:
"""
Description: 通过app查询
Summary: 通过app查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_findbyapp_ex(request, headers, runtime)
async def query_buildpack_findbyapp_async(
self,
request: deps_models.QueryBuildpackFindbyappRequest,
) -> deps_models.QueryBuildpackFindbyappResponse:
"""
Description: 通过app查询
Summary: 通过app查询
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_findbyapp_ex_async(request, headers, runtime)
def query_buildpack_findbyapp_ex(
self,
request: deps_models.QueryBuildpackFindbyappRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindbyappResponse:
"""
Description: 通过app查询
Summary: 通过app查询
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindbyappResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.findbyapp.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_findbyapp_ex_async(
self,
request: deps_models.QueryBuildpackFindbyappRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindbyappResponse:
"""
Description: 通过app查询
Summary: 通过app查询
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindbyappResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.findbyapp.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def create_buildpack_generatesignurl(
self,
request: deps_models.CreateBuildpackGeneratesignurlRequest,
) -> deps_models.CreateBuildpackGeneratesignurlResponse:
"""
Description: 生成url
Summary: 生成url
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.create_buildpack_generatesignurl_ex(request, headers, runtime)
async def create_buildpack_generatesignurl_async(
self,
request: deps_models.CreateBuildpackGeneratesignurlRequest,
) -> deps_models.CreateBuildpackGeneratesignurlResponse:
"""
Description: 生成url
Summary: 生成url
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.create_buildpack_generatesignurl_ex_async(request, headers, runtime)
def create_buildpack_generatesignurl_ex(
self,
request: deps_models.CreateBuildpackGeneratesignurlRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.CreateBuildpackGeneratesignurlResponse:
"""
Description: 生成url
Summary: 生成url
"""
UtilClient.validate_model(request)
return deps_models.CreateBuildpackGeneratesignurlResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.generatesignurl.create', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def create_buildpack_generatesignurl_ex_async(
self,
request: deps_models.CreateBuildpackGeneratesignurlRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.CreateBuildpackGeneratesignurlResponse:
"""
Description: 生成url
Summary: 生成url
"""
UtilClient.validate_model(request)
return deps_models.CreateBuildpackGeneratesignurlResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.generatesignurl.create', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_sumpackagessize(
self,
request: deps_models.QueryBuildpackSumpackagessizeRequest,
) -> deps_models.QueryBuildpackSumpackagessizeResponse:
"""
Description: 查询pagessize
Summary: 查询pagessize
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_sumpackagessize_ex(request, headers, runtime)
async def query_buildpack_sumpackagessize_async(
self,
request: deps_models.QueryBuildpackSumpackagessizeRequest,
) -> deps_models.QueryBuildpackSumpackagessizeResponse:
"""
Description: 查询pagessize
Summary: 查询pagessize
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_sumpackagessize_ex_async(request, headers, runtime)
def query_buildpack_sumpackagessize_ex(
self,
request: deps_models.QueryBuildpackSumpackagessizeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackSumpackagessizeResponse:
"""
Description: 查询pagessize
Summary: 查询pagessize
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackSumpackagessizeResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.sumpackagessize.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_sumpackagessize_ex_async(
self,
request: deps_models.QueryBuildpackSumpackagessizeRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackSumpackagessizeResponse:
"""
Description: 查询pagessize
Summary: 查询pagessize
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackSumpackagessizeResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.sumpackagessize.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_supportcoderepo(
self,
request: deps_models.QueryBuildpackSupportcoderepoRequest,
) -> deps_models.QueryBuildpackSupportcoderepoResponse:
"""
Description: 查询是否supportcode
Summary: 查询是否supportcode
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_supportcoderepo_ex(request, headers, runtime)
async def query_buildpack_supportcoderepo_async(
self,
request: deps_models.QueryBuildpackSupportcoderepoRequest,
) -> deps_models.QueryBuildpackSupportcoderepoResponse:
"""
Description: 查询是否supportcode
Summary: 查询是否supportcode
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_supportcoderepo_ex_async(request, headers, runtime)
def query_buildpack_supportcoderepo_ex(
self,
request: deps_models.QueryBuildpackSupportcoderepoRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackSupportcoderepoResponse:
"""
Description: 查询是否supportcode
Summary: 查询是否supportcode
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackSupportcoderepoResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.supportcoderepo.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_supportcoderepo_ex_async(
self,
request: deps_models.QueryBuildpackSupportcoderepoRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackSupportcoderepoResponse:
"""
Description: 查询是否supportcode
Summary: 查询是否supportcode
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackSupportcoderepoResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.supportcoderepo.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpack_findavailablebyappserviceids(
self,
request: deps_models.QueryBuildpackFindavailablebyappserviceidsRequest,
) -> deps_models.QueryBuildpackFindavailablebyappserviceidsResponse:
"""
Description: 通过可用的app服务id查询Composite信息
Summary: 查询Composite信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpack_findavailablebyappserviceids_ex(request, headers, runtime)
async def query_buildpack_findavailablebyappserviceids_async(
self,
request: deps_models.QueryBuildpackFindavailablebyappserviceidsRequest,
) -> deps_models.QueryBuildpackFindavailablebyappserviceidsResponse:
"""
Description: 通过可用的app服务id查询Composite信息
Summary: 查询Composite信息
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpack_findavailablebyappserviceids_ex_async(request, headers, runtime)
def query_buildpack_findavailablebyappserviceids_ex(
self,
request: deps_models.QueryBuildpackFindavailablebyappserviceidsRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindavailablebyappserviceidsResponse:
"""
Description: 通过可用的app服务id查询Composite信息
Summary: 查询Composite信息
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindavailablebyappserviceidsResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpack.findavailablebyappserviceids.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpack_findavailablebyappserviceids_ex_async(
self,
request: deps_models.QueryBuildpackFindavailablebyappserviceidsRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpackFindavailablebyappserviceidsResponse:
"""
Description: 通过可用的app服务id查询Composite信息
Summary: 查询Composite信息
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpackFindavailablebyappserviceidsResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpack.findavailablebyappserviceids.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def query_buildpacknew(
self,
request: deps_models.QueryBuildpacknewRequest,
) -> deps_models.QueryBuildpacknewResponse:
"""
Description: pageQuery
Summary: pageQuery
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.query_buildpacknew_ex(request, headers, runtime)
async def query_buildpacknew_async(
self,
request: deps_models.QueryBuildpacknewRequest,
) -> deps_models.QueryBuildpacknewResponse:
"""
Description: pageQuery
Summary: pageQuery
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.query_buildpacknew_ex_async(request, headers, runtime)
def query_buildpacknew_ex(
self,
request: deps_models.QueryBuildpacknewRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpacknewResponse:
"""
Description: pageQuery
Summary: pageQuery
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpacknewResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpacknew.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def query_buildpacknew_ex_async(
self,
request: deps_models.QueryBuildpacknewRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.QueryBuildpacknewResponse:
"""
Description: pageQuery
Summary: pageQuery
"""
UtilClient.validate_model(request)
return deps_models.QueryBuildpacknewResponse().from_map(
await self.do_request_async('1.0', 'antcloud.deps.buildpacknew.query', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
def get_buildpacknew(
self,
request: deps_models.GetBuildpacknewRequest,
) -> deps_models.GetBuildpacknewResponse:
"""
Description: buildpacknewget
Summary: buildpacknewget
"""
runtime = util_models.RuntimeOptions()
headers = {}
return self.get_buildpacknew_ex(request, headers, runtime)
async def get_buildpacknew_async(
self,
request: deps_models.GetBuildpacknewRequest,
) -> deps_models.GetBuildpacknewResponse:
"""
Description: buildpacknewget
Summary: buildpacknewget
"""
runtime = util_models.RuntimeOptions()
headers = {}
return await self.get_buildpacknew_ex_async(request, headers, runtime)
def get_buildpacknew_ex(
self,
request: deps_models.GetBuildpacknewRequest,
headers: Dict[str, str],
runtime: util_models.RuntimeOptions,
) -> deps_models.GetBuildpacknewResponse:
"""
Description: buildpacknewget
Summary: buildpacknewget
"""
UtilClient.validate_model(request)
return deps_models.GetBuildpacknewResponse().from_map(
self.do_request('1.0', 'antcloud.deps.buildpacknew.get', 'HTTPS', 'POST', f'/gateway.do', TeaCore.to_map(request), headers, runtime)
)
async def get_buildpacknew_ex_async(
self,
request: deps_models.GetBuildpacknewRequest,
headers: Dict[str, str],
| |
osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(
dict({'genusTypeId': str(authorization_genus_type)},
**self._view_filter())).sort('_id', DESCENDING)
return objects.AuthorizationList(result, runtime=self._runtime, proxy=self._proxy)
@utilities.arguments_not_none
def get_authorizations_by_parent_genus_type(self, authorization_genus_type):
"""Gets an ``AuthorizationList`` corresponding to the given authorization genus ``Type`` and include authorizations of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
arg: authorization_genus_type (osid.type.Type): an
authorization genus type
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: NullArgument - ``authorization_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type
# STILL NEED TO IMPLEMENT!!!
return objects.AuthorizationList([])
@utilities.arguments_not_none
def get_authorizations_by_record_type(self, authorization_record_type):
"""Gets an ``AuthorizationList`` containing the given authorization record ``Type``.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
arg: authorization_record_type (osid.type.Type): an
authorization record type
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: NullArgument - ``authorization_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_record_type
# STILL NEED TO IMPLEMENT!!!
return objects.AuthorizationList([])
@utilities.arguments_not_none
def get_authorizations_on_date(self, from_, to):
"""Gets an ``AuthorizationList`` effective during the entire given date range inclusive but not confined to the date range.
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``from`` or ``to`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.get_relationships_on_date
authorization_list = []
for authorization in self.get_authorizations():
if overlap(from_, to, authorization.start_date, authorization.end_date):
authorization_list.append(authorization)
return objects.AuthorizationList(authorization_list, runtime=self._runtime)
@utilities.arguments_not_none
def get_authorizations_for_resource(self, resource_id):
"""Gets a list of ``Authorizations`` associated with a given resource.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``resource_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_for_resource_on_date(self, resource_id, from_, to):
"""Gets an ``AuthorizationList`` effective during the entire given date range inclusive but not confined to the date range.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
In effective mode, authorizations are returned that are
currently effective. In any effective mode, active
authorizations and those currently expired are returned.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``resource_id, from`` or ``to`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_for_agent(self, agent_id):
"""Gets a list of ``Authorizations`` associated with a given agent.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
arg: agent_id (osid.id.Id): an agent ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``agent_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_for_agent_on_date(self, agent_id, from_, to):
"""Gets an ``AuthorizationList`` for the given agent and effective during the entire given date range inclusive but not confined to the date range.
arg: agent_id (osid.id.Id): an agent ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``agent_id, from`` or ``to`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_for_function(self, function_id):
"""Gets a list of ``Authorizations`` associated with a given function.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``function_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.learning.ActivityLookupSession.get_activities_for_objective_template
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(
dict({'functionId': str(function_id)},
**self._view_filter()))
return objects.AuthorizationList(result, runtime=self._runtime)
@utilities.arguments_not_none
def get_authorizations_for_function_on_date(self, function_id, from_, to):
"""Gets an ``AuthorizationList`` for the given function and effective during the entire given date range inclusive but not confined to the date range.
arg: function_id (osid.id.Id): a function ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``function_id, from`` or ``to`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_for_resource_and_function(self, resource_id, function_id):
"""Gets a list of ``Authorizations`` associated with a given resource.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned. In plenary mode, the
returned list contains all known authorizations or an error
results. Otherwise, the returned list may contain only those
authorizations that are accessible through this session.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: function_id (osid.id.Id): a function ``Id``
return: (osid.authorization.AuthorizationList) - the returned
``Authorization list``
raise: NullArgument - ``resource_id`` or ``function_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.relationship.RelationshipLookupSession.get_relationships_for_peers
# NOTE: This implementation currently ignores plenary and effective views
collection = JSONClientValidated('authorization',
collection='Authorization',
runtime=self._runtime)
result = collection.find(
dict({'sourceId': str(resource_id),
'destinationId': str(function_id)},
**self._view_filter())).sort('_id', ASCENDING)
return objects.AuthorizationList(result, runtime=self._runtime)
@utilities.arguments_not_none
def get_authorizations_for_resource_and_function_on_date(self, resource_id, function_id, from_, to):
"""Gets an ``AuthorizationList`` effective during the entire given date range inclusive but not confined to the date range.
Authorizations related to the given resource, including those
related through an ``Agent,`` are returned.
In plenary mode, the returned list contains all known
authorizations or an error results. Otherwise, the returned list
may contain only those authorizations that are accessible
through this session.
In effective mode, authorizations are returned that are
currently effective. In any effective mode, active
authorizations and those currently expired are returned.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: function_id (osid.id.Id): a function ``Id``
arg: from (osid.calendaring.DateTime): starting date
arg: to (osid.calendaring.DateTime): ending date
return: (osid.authorization.AuthorizationList) - the returned
``Authorization`` list
raise: InvalidArgument - ``from`` is greater than ``to``
raise: NullArgument - ``resource_id, function_id, from`` or
``to`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_authorizations_for_agent_and_function(self, agent_id, function_id):
"""Gets a list of ``Authorizations`` | |
<MappingProjection>` between them, in which supervised learning is used to modify the `matrix
<MappingProjection.matrix>` parameter of the `MappingProjections <MappingProjection>` in the sequence, so that the
input to the first ProcessingMechanism in the sequence generates an output from the last ProcessingMechanism that
matches as closely as possible a target value `specified as input <Composition_Target_Inputs>` in the Composition's
`learn <Composition.learn>` method. The Mechanisms in the pathway must be compatible with learning (that is, their
`function <Mechanism_Base.function>` must be compatible with the `function <LearningMechanism.function>` of the
`LearningMechanism` for the MappingProjections they receive (see `LearningMechanism_Function`). The Composition's
`learning methods <Composition_Learning_Methods>` return a learning `Pathway`, in which its `learning_components
<Pathway.learning_components>` attribute is assigned a dict containing the set of learning components generated for
the Pathway, as described below.
.. _Composition_Learning_Components:
*Supervised Learning Components*
================================
For each `learning pathway <Composition_Learning_Pathway>` specified in the **pathways** argument of a Composition's
constructor or one of its `learning methods <Composition_Learning_Methods>`, it creates the following Components,
and assigns to them the `NodeRoles <NodeRole>` indicated:
.. _TARGET_MECHANISM:
* *TARGET_MECHANISM* -- receives the desired `value <Mechanism_Base.value>` for the `OUTPUT_MECHANISM`, that is
used by the *OBJECTIVE_MECHANISM* as the target in computing the error signal (see above); that value must be
specified as an input to the TARGET_MECHANISM, either in the **inputs** argument of the Composition's `learn
<Composition.learn>` method, or in its **targets** argument in an entry for either the *TARGET_MECHANISM* or
the `OUTPUT_MECHANISM <OUTPUT_MECHANISM>` (see `below <Composition_Target_Inputs>`); the Mechanism is assigned
the `NodeRoles <NodeRole>` `TARGET` and `LEARNING` in the Composition.
..
* a MappingProjection that projects from the *TARGET_MECHANISM* to the *TARGET* `InputPort
<ComparatorMechanism_Structure>` of the *OBJECTIVE_MECHANISM*.
..
* a MappingProjection that projects from the last ProcessingMechanism in the learning Pathway to the *SAMPLE*
`InputPort <ComparatorMechanism_Structure>` of the *OBJECTIVE_MECHANISM*.
..
.. _OBJECTIVE_MECHANISM:
* *OBJECTIVE_MECHANISM* -- usually a `ComparatorMechanism`, used to `calculate an error signal
<ComparatorMechanism_Execution>` for the sequence by comparing the value received by the ComparatorMechanism's
*SAMPLE* `InputPort <ComparatorMechanism_Structure>` (from the `output <LearningMechanism_Activation_Output>` of
the last Processing Mechanism in the `learning Pathway <Composition_Learning_Pathway>`) with the value received
in the *OBJECTIVE_MECHANISM*'s *TARGET* `InputPort <ComparatorMechanism_Structure>` (from the *TARGET_MECHANISM*
generated by the method -- see below); this is assigned the `NodeRole` `LEARNING` in the Composition.
..
.. _LEARNING_MECHANISMS:
* *LEARNING_MECHANISMS* -- a `LearningMechanism` for each MappingProjection in the sequence, each of which
calculates the `learning_signal <LearningMechanism.learning_signal>` used to modify the `matrix
<MappingProjection.matrix>` parameter for the coresponding MappingProjection, along with a `LearningSignal` and
`LearningProjection` that convey the `learning_signal <LearningMechanism.learning_signal>` to the
MappingProjection's *MATRIX* `ParameterPort<Mapping_Matrix_ParameterPort>`; depending on learning method,
additional MappingProjections may be created to and/or from the LearningMechanism -- see
`LearningMechanism_Learning_Configurations` for details); these are assigned the `NodeRole` `LEARNING` in the
Composition.
..
.. _LEARNING_FUNCTION:
* *LEARNING_FUNCTION* -- the `LearningFunction` used by each of the `LEARNING_MECHANISMS` in the learning pathway.
..
.. _LEARNED_PROJECTIONS:
* *LEARNED_PROJECTIONS* -- a `LearningProjection` from each `LearningMechanism` to the `MappingProjection`
for which it modifies it s`matrix <MappingProjection.matrix>` parameter.
It also assigns the following item to the list of `learning_components` for the pathway:
.. _OUTPUT_MECHANISM:
* *OUTPUT_MECHANISM* -- the final `Node <Component_Nodes>` in the learning Pathway, the target `value
<Mechanism_Base.value>` for which is specified as input to the `TARGET_MECHANISM`; the Node is assigned
the `NodeRoles <NodeRole>` `OUTPUT` in the Composition.
The items with names listed above are placed in a dict that is assigned to the `learning_components
<Pathway.learning_components>` attribute of the `Pathway` returned by the learning method used to create the `Pathway`;
they key for each item in the dict is the name of the item (as listed above), and the object(s) created of that type
are its value (see `LearningMechanism_Single_Layer_Learning` for a more detailed description and figure showing these
Components).
If the learning Pathway <Composition_Learning_Pathway>` involves more than two ProcessingMechanisms (e.g. using
`add_backpropagation_learning_pathway` for a multilayered neural network), then multiple LearningMechanisms are
created, along with MappingProjections that provide them with the `error_signal <LearningMechanism.error_signal>`
from the preceding LearningMechanism, and `LearningProjections <LearningProjection>` that modify the corresponding
MappingProjections (*LEARNED_PROJECTION*\\s) in the `learning Pathway <Composition_Learning_Pathway>`, as shown for
an example in the figure below. These additional learning components are listed in the *LEARNING_MECHANISMS* and
*LEARNED_PROJECTIONS* entries of the dictionary assigned to the `learning_components <Pathway.learning_components>`
attribute of the `learning Pathway <Composition_Learning_Pathway>` return by the learning method.
.. _Composition_MultilayerLearning_Figure:
**Figure: Supervised Learning Components**
.. figure:: _static/Composition_Multilayer_Learning_fig.svg
:alt: Schematic of LearningMechanism and LearningProjections in a Process
:scale: 50 %
*Components for supervised learning Pathway*: the Pathway has three Mechanisms generated by a call to a `supervised
learning method <Composition_Learning_Methods>` (e.g., ``add_backpropagation_learning_pathway(pathway=[A,B,C])``),
with `NodeRole` assigned to each `Node <Composition_Nodes>` in the Composition's `graph <Composition.graph>` (in
italics below Mechanism type) and the names of the learning components returned by the learning method (capitalized
and in italics, above each Mechanism).
The description above (and `example <Composition_Examples_Learning_XOR>` >below) pertain to simple linear sequences.
However, more complex configurations, with convergent, divergent and/or intersecting sequences can be built using
multiple calls to the learning method (see `example <BasicsAndPrimer_Rumelhart_Model>` in `BasicsAndPrimer`). In
each the learning method determines how the sequence to be added relates to any existing ones with which it abuts or
intersects, and automatically creates andconfigures the relevant learning components so that the error terms are
properly computed and propagated by each LearningMechanism to the next in the configuration. It is important to note
that, in doing so, the status of a Mechanism in the final configuration takes precedence over its status in any of
the individual sequences specified in the `learning methods <Composition_Learning_Methods>` when building the
Composition. In particular, whereas ordinarily the last ProcessingMechanism of a sequence specified in a learning
method projects to a *OBJECTIVE_MECHANISM*, this may be superceded if multiple sequences are created. This is the
case if: i) the Mechanism is in a seqence that is contiguous (i.e., abuts or intersects) with others already in the
Composition, ii) the Mechanism appears in any of those other sequences and, iii) it is not the last Mechanism in
*all* of them; in that in that case, it will not project to a *OBJECTIVE_MECHANISM* (see `figure below
<Composition_Learning_Output_vs_Terminal_Figure>` for an example). Furthermore, if it *is* the last Mechanism in all
of them (that is, all of the specified pathways converge on that Mechanism), only one *OBJECTIVE_MECHANISM* is created
for that Mechanism (i.e., not one for each sequence). Finally, it should be noted that, by default, learning components
are *not* assigned the `NodeRole` of `OUTPUT` even though they may be the `TERMINAL` Mechanism of a Composition;
conversely, even though the last Mechanism of a `learning Pathway <Composition_Learning_Pathway>` projects to an
*OBJECTIVE_MECHANISM*, and thus is not the `TERMINAL` `Node <Composition_Nodes>` of a Composition, if it does not
project to any other Mechanisms in the Composition it is nevertheless assigned as an `OUTPUT` of the Composition. That
is, Mechanisms that would otherwise have been the `TERMINAL` Mechanism of a Composition preserve their role as an
`OUTPUT` Node of the Composition if they are part of a `learning Pathway <Composition_Learning_Pathway>` eventhough
they project to another Mechanism (the *OBJECTIVE_MECHANISM*) in the Composition.
.. _Composition_Learning_Output_vs_Terminal_Figure:
**OUTPUT** vs. **TERMINAL** Roles in Learning Configuration
.. figure:: _static/Composition_Learning_OUTPUT_vs_TERMINAL_fig.svg
:alt: Schematic of Mechanisms and Projections involved in learning
:scale: 50 %
Configuration of Components generated by the creation of two intersecting `learning Pathways
<Composition_Learning_Pathway>` (e.g., ``add_backpropagation_learning_pathway(pathway=[A,B])`` and
``add_backpropagation_learning_pathway(pathway=[D,B,C])``). Mechanism B is the last Mechanism of the sequence
specified for the first pathway, and so would project to a `ComparatorMechanism`, and would be assigned as an
`OUTPUT` `Node <Composition_Nodes>` of the Composition, if that pathway was created on its own. However, since
Mechanims B is also in the middle of the sequence specified for the second pathway, it does not project to a
ComparatorMechanism, and is relegated to being an `INTERNAL` Node of the Composition Mechanism C is now the
one that projects to the ComparatorMechanism and assigned as the `OUTPUT` Node.
.. _Composition_Learning_Execution:
*Execution of Learning*
=======================
For learning to occur when a Composition is run, its `learn <Composition.learn>` method must be used instead of the
`run <Composition.run>` method, and its `disable_learning <Composition.disable_learning>` attribute must be False.
When the `learn <Composition.learn>` method is used, all Components *unrelated* to learning are executed in the same
way as with the `run <Composition.run>` method. If the Composition has any `nested Composition | |
|_ __ _ __ _ ___ ___
# \___ \| __/ _` |/ _` |/ _ / __|
# ___) | || (_| | (_| | __\__ \
# |____/ \__\__,_|\__, |\___|___/
# |___/
#
################################################################################################
@app.route('/workflow/stage', cors=True, methods=['POST'], authorizer=authorizer)
def create_stage_api():
""" Create a stage state machine from a list of existing operations.
A stage is a set of operations that are grouped so they can be executed in parallel.
When the stage is executed as part of a workflow, operations within a stage are executed as
branches in a parallel Step Functions state. The generated state machines status is tracked by the
workflow engine control plane during execution.
An optional Configuration for each operator in the stage can be input to override the
default configuration for the stage.
Body:
.. code-block:: python
{
"Name":"stage-name",
"Operations": ["operation-name1", "operation-name2", ...]
}
Returns:
A dict mapping keys to the corresponding stage created including
the ARN of the state machine created.
{
"Name": string,
"Operations": [
"operation-name1",
"operation-name2",
...
],
"Configuration": {
"operation-name1": operation-configuration-object1,
"operation-name2": operation-configuration-object1,
...
}
},
{
"Name": "TestStage",
"Operations": [
"TestOperator"
],
"Configuration": {
"TestOperator": {
"MediaType": "Video",
"Enabled": true
}
}
}
Raises:
200: The stage was created successfully.
400: Bad Request - one of the input state machines was not found or was invalid
409: Conflict
500: ChaliceViewError - internal server error
"""
stage = None
stage = json.loads(app.current_request.raw_body.decode())
logger.info(json.loads(app.current_request.raw_body.decode()))
stage = create_stage(stage)
return stage
def create_stage(stage):
try:
stage_table = DYNAMO_RESOURCE.Table(STAGE_TABLE_NAME)
Configuration = {}
logger.info(stage)
validate(instance=stage, schema=SCHEMA["create_stage_request"])
logger.info("Stage schema is valid")
Name = stage["Name"]
# Check if this stage already exists
response = stage_table.get_item(
Key={
'Name': Name
},
ConsistentRead=True)
if "Item" in response:
raise ConflictError(
"A stage with the name '%s' already exists" % Name)
# Build the stage state machine. The stage machine consists of a parallel state with
# branches for each operator and a call to the stage completion lambda at the end.
# The parallel state takes a stage object as input. Each
# operator returns and operatorOutput object. The outputs for each operator are
# returned from the parallel state as elements of the "outputs" array.
stageAsl = {
"StartAt": "Preprocess Media",
"States": {
"Complete Stage {}".format(Name): {
"Type": "Task",
# FIXME - testing NoQ workflows
#"Resource": COMPLETE_STAGE_LAMBDA_ARN,
"Resource": COMPLETE_STAGE_LAMBDA_ARN,
"End": True
}
}
}
stageAsl["StartAt"] = Name
stageAsl["States"][Name] = {
"Type": "Parallel",
"Next": "Complete Stage {}".format(Name),
"ResultPath": "$.Outputs",
"Branches": [
],
"Catch": [
{
"ErrorEquals": ["States.ALL"],
"Next": "Complete Stage {}".format(Name),
"ResultPath": "$.Outputs"
}
]
}
# Add a branch to the stage state machine for each operation, build up default
# Configuration for the stage based on the operator Configuration
for op in stage["Operations"]:
# lookup base workflow
operation = get_operation_by_name(op)
logger.info(json.dumps(operation, cls=DecimalEncoder))
stageAsl["States"][Name]["Branches"].append(
json.loads(operation["StateMachineAsl"]))
Configuration[op] = operation["Configuration"]
stageAslString = json.dumps(stageAsl)
stageAslString = stageAslString.replace("%%STAGE_NAME%%", stage["Name"])
stageAsl = json.loads(stageAslString)
logger.info(json.dumps(stageAsl))
stage["Configuration"] = Configuration
# Build stage
stage["Definition"] = json.dumps(stageAsl)
stage["Version"] = "v0"
stage["Id"] = str(uuid.uuid4())
stage["Created"] = str(datetime.now().timestamp())
stage["ResourceType"] = "STAGE"
stage["ApiVersion"] = API_VERSION
stage_table.put_item(Item=stage)
except ValidationError as e:
logger.error("got bad request error: {}".format(e))
raise BadRequestError(e)
except Exception as e:
logger.error("Exception {}".format(e))
stage = None
raise ChaliceViewError("Exception '%s'" % e)
return stage
@app.route('/workflow/stage', cors=True, methods=['PUT'], authorizer=authorizer)
def update_stage():
""" Update a stage NOT IMPLEMENTED
XXX
"""
stage = {"Message": "NOT IMPLEMENTED"}
return stage
@app.route('/workflow/stage', cors=True, methods=['GET'], authorizer=authorizer)
def list_stages():
""" List all stage defintions
Returns:
A list of operation definitions.
Raises:
200: All operations returned sucessfully.
500: ChaliceViewError - internal server error
"""
table = DYNAMO_RESOURCE.Table(STAGE_TABLE_NAME)
response = table.scan()
stages = response['Items']
while 'LastEvaluatedKey' in response:
response = table.scan(ExclusiveStartKey=response['LastEvaluatedKey'])
stages.extend(response['Items'])
return stages
@app.route('/workflow/stage/{Name}', cors=True, methods=['GET'], authorizer=authorizer)
def get_stage_by_name(Name):
""" Get a stage definition by name
Returns:
A dictionary contianing the stage definition.
Raises:
200: All stages returned sucessfully.
404: Not found
500: Internal server error
"""
stage_table = DYNAMO_RESOURCE.Table(STAGE_TABLE_NAME)
stage = None
response = stage_table.get_item(
Key={
'Name': Name
})
if "Item" in response:
stage = response["Item"]
else:
raise NotFoundError(
"Exception: stage '%s' not found" % Name)
return stage
@app.route('/workflow/stage/{Name}', cors=True, methods=['DELETE'], authorizer=authorizer)
def delete_stage_api(Name):
""" Delete a stage
Returns:
A dictionary contianing the stage definition.
Raises:
200: Stage deleted sucessfully.
400: Bad Request - there are dependent workflows and query parameter force=False
404: Not found
500: ChaliceViewError - internal server error
"""
Force = False
params = app.current_request.query_params
if params and "force" in params and params["force"] == "true":
Force = True
stage = delete_stage(Name, Force)
return stage
def delete_stage(Name, Force):
table = DYNAMO_RESOURCE.Table(STAGE_TABLE_NAME)
logger.info("delete_stage({},{})".format(Name, Force))
try:
stage = {}
response = table.get_item(
Key={
'Name': Name
},
ConsistentRead=True)
if "Item" in response:
workflows = list_workflows_by_stage(Name)
stage = response["Item"]
if len(workflows) != 0 and Force == False:
Message = """Dependent workflows were found for stage {}.
Either delete the dependent workflows or set the query parameter
force=true to delete the stage anyhow. Undeleted dependent workflows
will be kept but will contain the deleted definition of the stage. To
find the workflow that depend on a stage use the following endpoint:
GET /workflow/list/stage/""".format(Name)
raise BadRequestError(Message)
response = table.delete_item(
Key={
'Name': Name
})
flag_stage_dependent_workflows(Name)
else:
stage["Message"] = "Warning: stage '{}' not found".format(Name)
except BadRequestError as e:
logger.error("got bad request error: {}".format(e))
raise
except Exception as e:
stage = None
logger.error("Exception {}".format(e))
raise ChaliceViewError("Exception: '%s'" % e)
return stage
def flag_stage_dependent_workflows(StageName):
try:
table = DYNAMO_RESOURCE.Table(WORKFLOW_TABLE_NAME)
workflows = list_workflows_by_stage(StageName)
for workflow in workflows:
result = table.update_item(
Key={
'Name': workflow["Name"]
},
UpdateExpression="SET StaleStages = list_append(StaleStages, :i)",
ExpressionAttributeValues={
':i': [StageName],
},
ReturnValues="UPDATED_NEW"
)
except Exception as e:
logger.error("Exception flagging workflows dependent on dropped stage {}".format(e))
raise ChaliceViewError("Exception: '%s'" % e)
return StageName
###############################################################################
# __ __ _ __ _
# \ \ / /__ _ __| | __/ _| | _____ _____
# \ \ /\ / / _ \| '__| |/ / |_| |/ _ \ \ /\ / / __|
# \ V V / (_) | | | <| _| | (_) \ V V /\__ \
# \_/\_/ \___/|_| |_|\_\_| |_|\___/ \_/\_/ |___/
#
###############################################################################
@app.route('/workflow', cors=True, methods=['POST'], authorizer=authorizer)
def create_workflow_api():
""" Create a workflow from a list of existing stages.
A workflow is a pipeline of stages that are executed sequentially to transform and
extract metadata for a set of MediaType objects. Each stage must contain either a
"Next" key indicating the next stage to execute or and "End" key indicating it
is the last stage.
Body:
.. code-block:: python
{
"Name": string,
"StartAt": string - name of starting stage,
"Stages": {
"stage-name": {
"Next": "string - name of next stage"
},
...,
"stage-name": {
"End": true
}
}
}
Returns:
A dict mapping keys to the corresponding workflow created including the
AWS resources used to execute each stage.
.. code-block:: python
{
"Name": string,
"StartAt": string - name of starting stage,
"Stages": {
"stage-name": {
"Resource": queueARN,
"StateMachine": stateMachineARN,
"Configuration": stageConfigurationObject,
"Next": "string - name of next stage"
},
...,
"stage-name": {
"Resource": queueARN,
"StateMachine": stateMachineARN,
"Configuration": stageConfigurationObject,
"End": true
}
}
}
Raises:
200: The workflow was created successfully.
400: Bad Request - one of the input stages was not found or was invalid
500: ChaliceViewError - internal server error
"""
workflow = json.loads(app.current_request.raw_body.decode())
logger.info(json.dumps(workflow))
return create_workflow("api", workflow)
def create_workflow(trigger, workflow):
try:
workflow_table = DYNAMO_RESOURCE.Table(WORKFLOW_TABLE_NAME)
workflow["Trigger"] = trigger
workflow["Operations"] = []
workflow["StaleOperations"] = []
workflow["StaleStages"] = []
workflow["Version"] = "v0"
workflow["Id"] = str(uuid.uuid4())
workflow["Created"] = str(datetime.now().timestamp())
workflow["Revisions"] = str(1)
workflow["ResourceType"] = "WORKFLOW"
workflow["ApiVersion"] = API_VERSION
logger.info(json.dumps(workflow))
# Validate inputs
checkRequiredInput("Name", workflow, "Workflow Definition")
checkRequiredInput("StartAt", workflow, "Workflow Definition")
checkRequiredInput("Stages", workflow, "Workflow Definition")
workflow = build_workflow(workflow)
# Build state machine
response = SFN_CLIENT.create_state_machine(
name=workflow["Name"] + "-" + STACK_SHORT_UUID,
definition=json.dumps(workflow["WorkflowAsl"]),
roleArn=STAGE_EXECUTION_ROLE,
tags=[
{
'key': 'environment',
'value': 'mie'
},
]
)
workflow.pop("WorkflowAsl")
workflow["StateMachineArn"] = response["stateMachineArn"]
workflow_table.put_item(
Item=workflow,
ConditionExpression="attribute_not_exists(#workflow_name)",
ExpressionAttributeNames={
'#workflow_name': "Name"
})
except ClientError as e:
# Ignore the ConditionalCheckFailedException, bubble up
# other exceptions.
if e.response['Error']['Code'] == 'ConditionalCheckFailedException':
raise ConflictError("Workflow with Name {} already exists".format(workflow["Name"]))
else:
raise
except Exception as e:
if "StateMachineArn" in workflow:
response = SFN_CLIENT.delete_state_machine(
workflow["StateMachineArn"]
)
logger.error("Exception {}".format(e))
workflow = None
| |
script
example_section_data.append(
Code(entries, "\n".join(item.out), ce_status)
)
if figname:
example_section_data.append(Fig(figname))
else:
assert isinstance(item.out, list)
example_section_data.append(Text("\n".join(item.out)))
# TODO fix this if plt.close not called and still a ligering figure.
fig_managers = executor.fig_man()
if len(fig_managers) != 0:
print(f"Unclosed figures in {qa}!!")
plt.close("all")
return processed_example_data(example_section_data), figs
def get_classes(code):
"""
Extract Pygments token classes names for given code block
"""
list(lex(code, PythonLexer()))
FMT = HtmlFormatter()
classes = [FMT.ttype2class.get(x) for x, y in lex(code, PythonLexer())]
classes = [c if c is not None else "" for c in classes]
return classes
def processed_example_data(example_section_data) -> Section:
"""this should be no-op on already ingested"""
new_example_section_data = Section()
for in_out in example_section_data:
type_ = in_out.__class__.__name__
# color examples with pygments classes
if type_ == "Text":
blocks = parse_rst_section(in_out.value)
for b in blocks:
new_example_section_data.append(b)
elif type_ == "Code":
in_ = in_out.entries
# assert len(in_[0]) == 3, len(in_[0])
if len(in_[0]) == 2:
text = "".join([x for x, y in in_])
classes = get_classes(text)
in_out.entries = [ii + (cc,) for ii, cc in zip(in_, classes)]
if type_ != "Text":
new_example_section_data.append(in_out)
return new_example_section_data
@lru_cache
def normalise_ref(ref):
"""
Consistently normalize references.
Refs are sometime import path, not fully qualified names, tough type
inference in examples regularly give us fully qualified names. When visiting
a ref, this tries to import it and replace it by the normal full-qualified form.
This is expensive, ad we likely want to move the logic of finding the
correct ref earlier in the process and us this as an assertion the refs are
normalized.
It is critical to normalize in order to have the correct information when
using interactive ?/??, or similar inspector of live objects;
"""
if ref.startswith(("builtins.", "__main__")):
return ref
try:
mod_name, name = ref.rsplit(".", maxsplit=1)
mod = __import__(mod_name)
for sub in mod_name.split(".")[1:]:
mod = getattr(mod, sub)
obj = getattr(mod, name)
if isinstance(obj, ModuleType):
return ref
if getattr(obj, "__name__", None) is None:
return ref
return obj.__module__ + "." + obj.__name__
except Exception:
pass
return ref
@dataclass
class Config:
# we might want to suppress progress/ rich as it infers with ipdb.
dummy_progress: bool = False
# Do not actually touch disk
dry_run: bool = False
exec_failure: Optional[str] = None # should move to enum
jedi_failure_mode: Optional[str] = None # move to enum ?
logo: Optional[str] = None # should change to path likely
execute_exclude_patterns: Sequence[str] = ()
infer: bool = True
exclude: Sequence[str] = () # list of dotted object name to exclude from collection
examples_folder: Optional[str] = None # < to path ?
submodules: Sequence[str] = ()
exec: bool = False
source: Optional[str] = None
homepage: Optional[str] = None
docs: Optional[str] = None
docs_path: Optional[str] = None
wait_for_plt_show: Optional[bool] = True
examples_exclude: Sequence[str] = ()
exclude_jedi: Sequence[str] = ()
implied_imports: Dict[str, str] = dataclasses.field(default_factory=dict)
expected_errors: Dict[str, List[str]] = dataclasses.field(default_factory=dict)
early_error: bool = True
def replace(self, **kwargs):
return dataclasses.replace(self, **kwargs)
def load_configuration(path: str) -> Tuple[str, MutableMapping[str, Any]]:
"""
Given a path, load a configuration from a File.
"""
conffile = Path(path).expanduser()
if conffile.exists():
conf: MutableMapping[str, Any] = toml.loads(conffile.read_text())
assert len(conf.keys()) == 1
root = next(iter(conf.keys()))
return root, conf[root]
else:
sys.exit(f"{conffile!r} does not exists.")
def gen_main(
infer: Optional[bool],
exec_: Optional[bool],
target_file: str,
debug,
*,
dummy_progress: bool,
dry_run=bool,
api: bool,
examples: bool,
fail,
narrative,
) -> None:
"""
Main entry point to generate docbundle files,
This will take care of reading single configuration file with the option
for the library you want to build the docs for, scrape API, narrative and
examples, and put it into a doc bundle for later consumption.
Parameters
----------
infer : bool | None
CLI override of whether to run type inference on examples
exec_ : bool | None
CLI override of whether to execute examples/code blocks
target_file : str
Patch of configuration file
dummy_progress : bool
CLI flag to disable progress that might screw up with ipdb formatting
when debugging.
api : bool
CLI override of whether to build api docs
examples : bool
CLI override of whether to build examples docs
fail
TBD
narrative : bool
CLI override of whether to build narrative docs
dry_run : bool
don't write to disk
debug : bool
set log level to debug
Returns
-------
None
"""
target_module_name, conf = load_configuration(target_file)
config = Config(**conf, dry_run=dry_run, dummy_progress=dummy_progress)
if exec_ is not None:
config.exec = exec_
if infer is not None:
config.infer = infer
target_dir = Path("~/.papyri/data").expanduser()
if not target_dir.exists() and not config.dry_run:
target_dir.mkdir(parents=True, exist_ok=True)
if dry_run:
temp_dir = tempfile.TemporaryDirectory()
target_dir = Path(temp_dir.name)
g = Gen(dummy_progress=dummy_progress, config=config)
g.log.info("Will write data to %s", target_dir)
if debug:
g.log.setLevel("DEBUG")
g.log.debug("Log level set to debug")
g.collect_package_metadata(
target_module_name,
relative_dir=Path(target_file).parent,
)
if examples:
g.collect_examples_out()
if api:
g.collect_api_docs(target_module_name)
if narrative:
g.collect_narrative_docs()
p = target_dir / (g.root + "_" + g.version)
p.mkdir(exist_ok=True)
g.log.info("Saving current Doc bundle to %s", p)
g.clean(p)
g.write(p)
if dry_run:
temp_dir.cleanup()
def full_qual(obj):
if isinstance(obj, ModuleType):
return obj.__name__
else:
try:
if hasattr(obj, "__qualname__") and (
getattr(obj, "__module__", None) is not None
):
return obj.__module__ + "." + obj.__qualname__
elif hasattr(obj, "__name__") and (
getattr(obj, "__module__", None) is not None
):
return obj.__module__ + "." + obj.__name__
except Exception:
pass
return None
return None
class DFSCollector:
"""
Depth first search collector.
Will scan documentation to find all reachable items in the namespace
of our root object (we don't want to go scan other libraries).
Three was some issues with BFS collector originally, I'm not sure I remember what.
"""
def __init__(self, root, others):
"""
Parameters
----------
root
Base object, typically module we want to scan itself.
We will attempt to no scan any object which does not belong
to the root or one of its children.
others
List of other objects to use a base to explore the object graph.
Typically this is because some packages do not import some
submodules by default, so we need to pass these submodules
explicitly.
"""
assert isinstance(root, ModuleType), root
self.root = root.__name__
assert "." not in self.root
self.obj: Dict[str, Any] = dict()
self.aliases = defaultdict(lambda: [])
self._open_list = [(root, [root.__name__])]
for o in others:
self._open_list.append((o, o.__name__.split(".")))
def scan(self) -> None:
"""
Attempt to find all objects.
"""
while len(self._open_list) >= 1:
current, stack = self._open_list.pop(0)
# numpy objects ane no bool values.
if id(current) not in [id(x) for x in self.obj.values()]:
self.visit(current, stack)
def prune(self) -> None:
"""
Some object can be reached many times via multiple path.
We try to remove duplicate path we use to reach given objects.
Notes
-----
At some point we might want to save all objects aliases,
in order to extract the canonical import name (visible to users),
and to resolve references.
"""
for qa, item in self.obj.items():
if (nqa := full_qual(item)) != qa:
print("after import qa differs : {qa} -> {nqa}")
if self.obj[nqa] == item:
print("present twice")
del self.obj[nqa]
else:
print("differs: {item} != {other}")
def items(self) -> Dict[str, Any]:
self.scan()
self.prune()
return self.obj
def visit(self, obj, stack):
"""
Recursively visit Module, Classes, and Functions by tracking which path
we took there.
"""
try:
qa = full_qual(obj)
except Exception as e:
raise RuntimeError(f"error visiting {'.'.join(self.stack)}") from e
if not qa:
if (
"__doc__" not in stack
and hasattr(obj, "__doc__")
and not full_qual(type(obj)).startswith("builtins.")
):
# might be worth looking into like np.exp.
pass
return
if not qa.split(".")[0] == self.root:
return
if obj in self.obj.values():
return
if (qa in self.obj) and self.obj[qa] != obj:
pass
self.obj[qa] = obj
self.aliases[qa].append(".".join(stack))
if isinstance(obj, ModuleType):
return self.visit_ModuleType(obj, stack)
elif isinstance(obj, FunctionType):
return self.visit_FunctionType(obj, stack)
elif isinstance(obj, type):
return self.visit_ClassType(obj, stack)
else:
pass
def visit_ModuleType(self, mod, stack):
for k in dir(mod):
# TODO: scipy 1.8 workaround, remove.
if not hasattr(mod, k):
print(f"scipy 1.8 workround : ({mod.__name__!r},{k!r}),")
continue
self._open_list.append((getattr(mod, k), stack + [k]))
def visit_ClassType(self, klass, stack):
for k, v in klass.__dict__.items():
self._open_list.append((v, stack + [k]))
def visit_FunctionType(self, fun, stack):
pass
class DocBlob(Node):
"""
An object containing information about the documentation of an arbitrary object.
Instead of docblob begin a NumpyDocString, I'm thinking of them having a numpydocstring.
This helps with | |
script will be completely wiped, and replaced with the new one.
"""
def __init__(self):
self.version = 5.0
def check_update(self):
"""Sends the request to the github repository, and checks to see if the script needs and update."""
print(SquidNet.logo.fget())
print("[+] Checking for updates.....")
version = self.version - 1.0
updated = False
try:
req = urllib.request.Request(url="https://raw.githubusercontent.com/DrSquidX/SquidNet2/main/SquidNet2Version.json")
recv = urllib.request.urlopen(req).read().decode()
version_info = open("SquidNet2Version.json","w")
version_info.write(recv)
version_info.close()
json_info = json.load(open(version_info.name,"r"))
version = float(json_info[0]["SquidNet2"])
except:
print("[+] There was an error with checking updates, starting SquidNet2.")
if version > self.version:
print(f"[+] Your Version of SquidNet2 is outdated. You have version {self.version}, whereas the current update is version v{version}.")
if sys.argv[0].endswith(".py"):
update = input("\n[+] Do you wish to update?(y/n): ").lower()
if update == "y" or update == "yes":
print(f"[+] Updating SquidNet2 to v{version}")
updated = True
req = urllib.request.Request(url="https://raw.githubusercontent.com/DrSquidX/SquidNet2/main/MainScripts/SquidNet2.py")
resp = urllib.request.urlopen(req).read()
file = open(sys.argv[0],"wb")
file.write(resp)
file.close()
else:
print("[+] Choosing not to update.")
else:
updated = False
print("[+] Not updating due to the file not being a '.py'.\n[+] Starting SquidNet2 in 3 seconds.....")
time.sleep(3)
if not updated:
if sys.platform == "win32":
os.system("cls")
else:
os.system("clear")
Squidnet = Config(self.version)
else:
print("[+] Restart the Script to have the Update be effective!")
class Config:
"""
# Configuration
This class is needed for configuring the settings that allow the server to function properly.
There are 2 choices of configuration: Option-Parsing and the usage of a Config file."""
def __init__(self, version):
self.version = version
self.config_file = "server.config"
self.filearg = sys.argv[0].split("/")[len(sys.argv[0].split("/"))-1]
if ".py" in self.filearg:
self.filearg = f"python3 {self.filearg}"
self.parse_args()
def information(self):
print(f"""[+] SquidNet2: The Sequel to SquidNet that nobody asked for, but everyone needed.
[+] Written in Python 3.8.3
[+] Why SquidNet2?
SquidNet2 offers all of the features(except for SSH, just use SquidNetSSH) that the original had,
but better. One prime example is the significantly improved web interface, with many others like
more security and more stability. There are more functions that were built on top of the original
and there are more possibilities with SquidNet2 that are achievable compared to SquidNet.
[+] The SquidNet2 Framework:
SquidNet2 - Server:
This script is the server part of the SquidNet2 framework. It is the foundation and handler of
all the bots and admin connections. It acts as a command and control server, where the bots connect
to this server, and the admin also has to as well, to then execute commands on this server. This
is so that the admins can communicate and connect to the server wherever and whenever they want,
as long as the Server itself is up.
SquidNet2 - Admin:
While this acts as the handler to ensure control of remote computers, there still needs to be an admin
that is able to remotely execute commands on the bot computers. This is where the admin comes into
play. The admin connects to the server, and logs into the admin account that has been configured
when the server had started. Once authentication is complete, the admin will have access to the server
and all of the bots that are connected to it.
SquidNet2 - Bots:
The bots are the victim computers that will unknowingly connect to the SquidNet2 server. There is a
payload that is automatically generated by the server that can then be run by victim computers and
connect to that server specifically. There are numerous commands that are built into the payload,
which the admin of the server can run them to extract information or run commands remotely on those
computers. These bots can also run shell commands, if there are not any commands being sent that are
part of the in-built commands that the payload provides.
[+] Usefulness and function of SquidNet2:
- Remotely accessing lost computers
- Taking control of other people's computers(illegal without consent)
- Penetration Testing
- Impressive
- Lots of options for overall better experience
[+] Risks:
- Being careless and taking control of computers, which is illegal.
- Server might not be up to security standards(need to improve authentication)
[+] Topology of the SquidNet2 Framework:
_________
| | - Admin
| Admin | Sends commands to the server.
|_______| The admin also recieves messages
from the server with information
^ regarding command output, or other
| important info on the server's
| status.
V
____________
| | - Server
| Server | Recieves the Admin's instruction
| | and sends it to the bots
|__________| It also recieves bot output and sends
^ ^ ^ it to the admin.
/ | \\
/ | \\
V V V
_________ _________ _________ - Bots
| | | | | | Recieves the command via the server,
| Bot | | Bot | | Bot | executes it and sends any output back.
|_______| |_______| |_______| They are being remotely controlled.
[+] Key:
<-->(arrows) - Indicate direction of messages and packets
'-' - Notations
[+] Features:
Web Interface:
A web interface that cleanly shows information about the bots in
nice tables, with the additional ability to also be able to run
commands via the web interface to the bots and more. There is
information displayed that shows the settings and configuration of
the server, giving the user information that shows what the server
is using to function.
Options for Server configuration:
There is the ability to use the option-parsing that most scripts
use, or to use a configuration file that allows for quick and
easy configuration, and allows the server to be started quicker
without needing to constantly type the same credentials over and
over again.
Numerous Commands:
{SquidNet.help_msg.fget(None).replace("[+]"," ").replace("[(SERVER)]: Info about each command:"," ").replace("!"," !").strip()}
[+] For your information:
Read the github README.md file for more information on the SquidNet2 framework.
This script was made for educational purposes only. Any illegal use of this
script not caused by the developer is not responsible for any damages done.
This script follows this license(MIT):
''' MIT License
Copyright (c) 2022 DrSquidX
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
Thank you for agreeing with these terms.
[+] Overall:
SquidNet2 is the far superior version of SquidNet, with many features that were
reused and improved onto, as well as many new features being added to increase the
stability and function of the framework.
[+] Happy (Ethical) Hacking! - DrSquid
""")
sys.exit()
def help_msg(self):
print(f"""
Usage: {self.filearg} [options..]
Options:
-h, --help Show this help message and exit.
--ip, --ipaddr The IP Address that the server will bind to.(required)
--p, --port The port that will bind to the IP address.(default:
8080)
--eip, --externalip The external IP that Bots will connect to.(default:
opt.ip)
--ep, --externalport The external Port that Bots will connect to.(default:
opt.p)
--ek, --enckey The encryption key used on the bots for file
encryption.(default:
b'<KEY>
--l, --logfile The file used for server logging.(default: log.txt)
--au, --adminuser The username for the admin account.(default: admin)
--ap, --adminpass The password for the admin account.(default:
<PASSWORD>)
--fd, --ftpdir | |
are used rule elements.
Constant groups et cetera
"""
@staticmethod
def single_position():
""" Single position N.
0...9 for 0...9
A...Z for 10...35
* for max_length
- for (max_length - 1)
+ for (max_length + 1)
a...k user-defined numeric variables (with the "v" command)
l initial or updated word's length (updated whenever "v" is used)
m initial or memorized word's last character position
p position of the character last found with the "/" or "%" commands
z "infinite" position or length (beyond end of word)
"""
if RUNTIME_CONFIG.is_jtr():
return Word(jtr_numeric_constants, exact=1)
elif RUNTIME_CONFIG.is_hc():
return Word(hc_numeric_constants, exact=1)
else:
raise FatalRuntimeError(
"Unknown RUNTIME_CONFIG['running_style'] Type: {}".format(
RUNTIME_CONFIG['running_style']))
@staticmethod
@jtr_only_func
def in_bracket_position():
""" Valid positions that can appear in [], only valid in JtR mode
Example:
A[1-3A-B]"ab"
"""
# the chars that must be escaped in a range. "-" is valid position in JtR.
must_escaped_chars = "-"
# first, purge all must_escaped_chars from
purged = jtr_numeric_constants
for c in must_escaped_chars:
purged = purged.replace(c, "")
# next, add the escaped version
valid_singles = Word(purged, exact=1)
for c in must_escaped_chars:
valid_singles = Literal("\\" + c) | valid_singles
# valid position ranges using a dash: A-Z, 0-9
valid_ranges = Word(jtr_numeric_constants_dash_allowed, exact=1) + \
Literal("-") + Word(jtr_numeric_constants_dash_allowed, exact=1)
return valid_ranges | valid_singles
@staticmethod
def positions_in_bracket():
""" add [] to valid in_bracket_position"""
# combine both
return Elements._add_brackets(Groups.in_bracket_position())
@staticmethod
def single_char():
""" Valid single char X"""
initial_chars = printables
# Additional requirements for JTR.
if RUNTIME_CONFIG.is_jtr():
# purge the must escape chars
for c in jtr_must_escape_for_single_char:
initial_chars = initial_chars.replace(c, "")
escaped_valid_chars = Word(initial_chars, exact=1)
# escape must escape chars.
for c in jtr_must_escape_for_single_char:
escaped_valid_chars = Literal("\\" + c) | escaped_valid_chars
# add could escape chars.
for c in jtr_could_escape_for_single_char:
escaped_valid_chars = Literal("\\" + c) | escaped_valid_chars
else:
escaped_valid_chars = Word(initial_chars, exact=1)
# Consider space
return escaped_valid_chars | White(" ", max=1)
@staticmethod
def single_char_for_char_class():
""" Valid single char X that considers character class"""
initial_chars = printables
if RUNTIME_CONFIG.is_jtr():
for c in "?" + jtr_must_escape_for_single_char:
initial_chars = initial_chars.replace(c, "")
valid_single_char = Word(initial_chars, exact=1)
for c in jtr_must_escape_for_single_char:
valid_single_char = Literal("\\" + c) | valid_single_char
else:
valid_single_char = Word(initial_chars, exact=1)
# Consider space
valid_single_char = valid_single_char | White(" ", max=1)
return valid_single_char
@staticmethod
def range_char_for_char_class():
""" Valid range char [X] that considers character class"""
initial_chars = printables
if RUNTIME_CONFIG.is_jtr():
for c in "?" + jtr_must_escape_for_range:
initial_chars = initial_chars.replace(c, "")
valid_in_bracket_char = Word(initial_chars, exact=1)
for c in jtr_must_escape_for_range:
valid_in_bracket_char = Literal("\\" +
c) | valid_in_bracket_char
else:
valid_in_bracket_char = Word(initial_chars, exact=1)
# Consider space
valid_in_bracket_char = valid_in_bracket_char | White(" ", max=1)
return valid_in_bracket_char
@staticmethod
def range_char_for_char_class_in_bracket():
""" add brackets (aka add '[]') to a group of chars """
return Elements._add_brackets(Groups.range_char_for_char_class())
@staticmethod
def single_class():
""" character class """
return Word(CHAR_CLASSES, exact=1)
@staticmethod
def class_range():
""" character class in range"""
return Groups.single_class()
@staticmethod
def class_range_in_bracket():
""" character class in range with parallel"""
return Elements._add_brackets(Groups.class_range())
@staticmethod
@jtr_only_func
def in_bracket_char():
""" A range of chars that can appear in [], only valid in JtR
To parse a range, not like single_char, we don't parse chars separately, we read the range as a whole.
At this stage you just need to capture [], that's it.
So it should be Literal("[") + allchar(replace"]") + Literal("]")
"""
# Escape ]
initial_chars = printables
for c in jtr_must_escape_for_range:
initial_chars = initial_chars.replace(c, "")
valid_in_bracket_char = Word(initial_chars, exact=1)
for c in jtr_must_escape_for_range:
valid_in_bracket_char = Literal("\\" + c) | valid_in_bracket_char
# Consider space
valid_in_bracket_char = valid_in_bracket_char | White(" ", max=1)
return valid_in_bracket_char
@staticmethod
@jtr_only_func
def chars_in_bracket():
r""" add [] to valid in_bracket_char """
return Elements._add_brackets(Groups.in_bracket_char())
@staticmethod
def single_char_append():
""" Valid single char X in Az"", only valid in JtR """
# Remove " from chars
initial_chars = printables.replace('"', "")
# Escape [ AND \
for c in jtr_must_escape_for_single_char:
initial_chars = initial_chars.replace(c, "")
escaped_valid_chars = Word(initial_chars, exact=1)
for c in jtr_must_escape_for_single_char:
escaped_valid_chars = Literal("\\" + c) | escaped_valid_chars
for c in jtr_could_escape_for_single_char:
escaped_valid_chars = Literal("\\" + c) | escaped_valid_chars
# Consider space
escaped_valid_chars = escaped_valid_chars | White(" ", max=1)
return escaped_valid_chars
@staticmethod
@jtr_only_func
def in_bracket_char_append():
""" A range that appears in Az"[]", The difference is " is not allowed
To parse a range, not like single_char, we don't parse chars seperately, we read the range as a whole.
"""
# Note: Remove " from strings, its illegal to have it in quotes
initial_chars = printables.replace('"', "")
for c in jtr_must_escape_for_range:
initial_chars = initial_chars.replace(c, "")
valid_single_char = Word(initial_chars, exact=1)
for c in jtr_must_escape_for_range:
valid_single_char = Literal("\\" + c) | valid_single_char
# Consider space
valid_single_char = valid_single_char | White(" ", max=1)
return valid_single_char
@staticmethod
@jtr_only_func
def chars_append_in_bracket():
r""" add [] to valid in_bracket_char_append """
return Elements._add_brackets(Groups.in_bracket_char_append())
@staticmethod
def get_all_possible(char_type):
""" Get all possible chars/positions. Specify what type do you want.
If type == JtR, need to consider ranges and parallelism.
"""
if char_type == "char":
all_values = Groups.single_char()
if RUNTIME_CONFIG.is_jtr():
in_bracket_char = Groups.chars_in_bracket()
slash_p_range_char = Elements._create_slash_parallel_cmds(
in_bracket_char)
slash_number = Elements._create_slash_number_cmds()
all_values = slash_number | slash_p_range_char | in_bracket_char | all_values
elif char_type == "char_append":
all_values = Groups.single_char_append()
if RUNTIME_CONFIG.is_jtr():
in_bracket_char_append = Groups.chars_append_in_bracket()
slash_p_range_char_append = Elements._create_slash_parallel_cmds(
in_bracket_char_append)
slash_number = Elements._create_slash_number_cmds()
all_values = slash_number | slash_p_range_char_append | in_bracket_char_append | all_values
elif char_type == "char_for_class":
all_values = Groups.single_char_for_char_class()
if RUNTIME_CONFIG.is_jtr(): # JTR supports
in_bracket_char_for_class = Groups.range_char_for_char_class_in_bracket(
)
slash_p_in_bracket_char_class = Elements._create_slash_parallel_cmds(
in_bracket_char_for_class)
slash_number = Elements._create_slash_number_cmds()
all_values = slash_number | in_bracket_char_for_class | slash_p_in_bracket_char_class | all_values
elif char_type == "class":
all_values = Groups.single_class()
if RUNTIME_CONFIG.is_jtr(): # JTR supports
in_bracket_class = Groups.class_range_in_bracket()
slash_p_in_bracket_class = Elements._create_slash_parallel_cmds(
in_bracket_class)
slash_number = Elements._create_slash_number_cmds()
all_values = slash_number | in_bracket_class | slash_p_in_bracket_class | all_values
elif char_type == "simple_position":
all_values = Groups.single_position()
if RUNTIME_CONFIG.is_jtr():
in_bracket_position = Groups.positions_in_bracket()
slash_p_in_bracket_position = Elements._create_slash_parallel_cmds(
in_bracket_position)
slash_number = Elements._create_slash_number_cmds()
all_values = slash_number | slash_p_in_bracket_position | in_bracket_position | all_values
else:
raise Exception("Unknown Char_Class")
return all_values
@staticmethod
@jtr_only_func
def character_classes_group():
""" All character classes
?? matches "?"
?v matches vowels: "aeiouAEIOU"
?c matches consonants: "bcdfghjklmnpqrstvwxyzBCDFGHJKLMNPQRSTVWXYZ"
?w matches whitespace: space and horizontal tabulation characters
?p matches punctuation: ".,:;'?!`" and the double quote character
?s matches symbols "$%^&*()-_+=|\<>[]{}#@/~"
?l matches lowercase letters [a-z]
?u matches uppercase letters [A-Z]
?d matches digits [0-9]
?a matches letters [a-zA-Z]
?x matches letters and digits [a-zA-Z0-9]
?z matches all characters
"""
all_chars = Groups.get_all_possible("class")
return Literal('?') + all_chars
class Elements():
"""This class creates a parser capable for the JTR Rule language."""
@staticmethod
@jtr_only_func
def _add_brackets(cmds):
"""Add brackets to commands, and accept one/more cmds inside the bracket. JTR Only
This function does not modify cmds or add dash to cmds.
"""
if not isinstance(cmds, MatchFirst) and not isinstance(cmds, Word):
raise Exception("Wrong Usage of func _create_group_of_cmds")
if cmds.matches("]"):
raise Exception("Cores should escape brackets {}".format(cmds))
return Combine(Literal("[") + OneOrMore(cmds) + Literal("]"))
@staticmethod
@jtr_only_func
def _create_slash_parallel_cmds(parallel_cmds):
"""Add \p, \p1-\p9, \r to a cmd range ([cmds]). JTR Only"""
slash = ZeroOrMore(
Literal("\p") + Word(nums, exact=1) | Literal("\p") |
Literal("\\r"))
slash_cmd = Combine(slash + parallel_cmds)
return slash_cmd
@staticmethod
@jtr_only_func
def _create_slash_number_cmds():
"""Create \0-\9, which refers to previous ranges. JTR Only"""
slash_num = Combine(Literal("\\") + Word(nums, exact=1))
return slash_num
@staticmethod
def reject_flags():
""" Parse Rejection Flags. JTR Only
-: no-op: don't reject
-c reject this rule unless current hash type is case-sensitive
-8 reject this rule unless current hash type uses 8-bit characters
-s reject this rule unless some password hashes were split at loading
-p reject this rule unless word pair commands are currently allowed
->N reject this rule unless length N or longer is supported
-<N reject this rule unless length N or shorter is supported
"""
if RUNTIME_CONFIG[
'running_style'] != RunningStyle.JTR: # Only used in JTR
return Empty()
str_reject_flags_prefix = "-"
str_reject_flags_cores = "c8sp:"
str_reject_flags_length = "<>"
word_reject_flags_prefix = Word(str_reject_flags_prefix, exact=1)
word_reject_flags_cores = Word(str_reject_flags_cores, exact=1)
word_reject_flags_length = Word(str_reject_flags_length, exact=1)
simple_reject_flags = Combine(word_reject_flags_prefix +
word_reject_flags_cores)
simple_reject_flags_length = Combine(
word_reject_flags_prefix + word_reject_flags_length +
Word(jtr_numeric_constants_dash_allowed, exact=1))
# -[:c]
parallel_reject_flags = Combine(word_reject_flags_prefix + Elements.
_add_brackets(word_reject_flags_cores))
# -\r\p[:c]
parallel_reject_flags_slash = Combine(
word_reject_flags_prefix + Elements._create_slash_parallel_cmds(
Elements._add_brackets(word_reject_flags_cores)))
# ->8, -<7
| |
<gh_stars>0
from io import BytesIO
import os
from collections import OrderedDict
from typing import List
from pathlib import Path
import random
from sslib.bzs import parseBzs, buildBzs
import nlzss11
from sslib.u8file import U8File
from sslib import parseMSB, buildMSB, Patcher, AllPatcher
EXTRACT_ROOT_PATH='actual-extract'
MODIFIED_ROOT_PATH='modified-extract'
extracts={
('D003_0', 0): ['oarc/GetTriForceSingle.arc'], # Triforce part
('D301', 0): ['oarc/GetBowA.arc'], # Bow
('F001r', 3):[
'oarc/GetKobunALetter.arc', # Cawlin's Letter
'oarc/GetPouchA.arc' # Adventure Pouch
],
('F002r', 1):[
'oarc/GetPouchB.arc', # Extra Pouch Slot
'oarc/GetMedal.arc', # all Medals
'oarc/GetNetA.arc' # Bug Net
],
('F004r', 0):[
'oarc/GetPachinkoB.arc', # Scatershot
'oarc/GetBowB.arc', # Iron Bow
'oarc/GetBowC.arc', # Sacred Bow
'oarc/GetBeetleC.arc', # Quick beetle
'oarc/GetBeetleD.arc', # Though Beetle
'oarc/GetNetB.arc' # Big Bug Net
# a bunch more bottles and other stuff is also here
],
('F202', 1): [
'oarc/GetPachinkoA.arc', # slingshot
'oarc/GetHookShot.arc', # clawshots
'oarc/GetMoleGloveB.arc', # mogma mitts
'oarc/GetVacuum.arc', # gust bellows
'oarc/GetWhip.arc', # whip
'oarc/GetBombBag.arc' # bomb bag
],
('F210', 0):['oarc/GetMoleGloveA.arc'], # digging mitts
('S100', 2):['oarc/GetSizuku.arc'], # water dragon scale
('S200', 2):['oarc/GetEarring.arc'], # fireshield earrings
('D100', 1):['oarc/GetBeetleA.arc'], # beetle
('F300', 0):['oarc/GetBeetleB.arc'], # hook beetle
('F301_5', 0):['oarc/GetMapSea.arc'], # Sand Sea Map
('F402', 2):['oarc/GetHarp.arc'], # all Songs & Harp
('F000', 0):[
'oarc/MoldoGut_Baby.arc', # babies rattle
'oarc/GetSeedLife.arc' # LTS
],
('F000', 4):[
'oarc/GetShieldWood.arc', # wooden shield
'oarc/GetShieldHylia.arc' # hylian shield
],
('F100', 3):[ # stuff for silent realms
'oarc/PLHarpPlay.arc',
'oarc/SirenEntrance.arc',
'oarc/PLSwordStick.arc'
],
('F020', 1):['oarc/GetBirdStatue.arc'], # Bird statuette
('F023', 0):['oarc/GetTerryCage.arc'], # Beedle's Beetle
}
def get_stagepath(stage: str, layer: int=0, rootpath: str=EXTRACT_ROOT_PATH) -> Path:
return Path(__file__).parent / rootpath / 'DATA' / 'files' / 'Stage' / stage / f'{stage}_stg_l{layer}.arc.LZ'
def extract_objects():
try:
os.mkdir('oarc')
except:
pass
for (file, layer), objs in extracts.items():
with get_stagepath(file, layer).open('rb') as f:
data=nlzss11.decompress(f.read())
data=BytesIO(data)
data=U8File.parse_u8(data)
for objname in objs:
outdata=data.get_file_data(objname)
with open(objname,'wb') as out:
out.write(outdata)
def get_names():
with open(EXTRACT_ROOT_PATH+'/DATA/files/Stage/F000/F000_stg_l4.arc.LZ','rb') as f:
data=nlzss11.decompress(f.read())
data=BytesIO(data)
data=U8File.parse_u8(data)
# room=data.get_data('rarc/D000_r00.arc:dat/room.bzs')
for arc in len(data.get_all_paths):
if arc.endswith('.arc'):
print(arc)
# print(data._get_subarc(arc).get_all_paths_recursive())
def testpatch():
# open the skyloft cave file
with open(EXTRACT_ROOT_PATH+'/DATA/files/Stage/D000/D000_stg_l0.arc.LZ','rb') as f:
# extract in memory
data=nlzss11.decompress(f.read())
data=BytesIO(data)
data=U8File.parse_u8(data)
# add hookshot and gust bellows
with open('oarc/GetHookShot.arc','rb') as h:
data.add_file_data('oarc/GetHookShot.arc', h.read())
with open('oarc/GetVacuum.arc','rb') as h:
data.add_file_data('oarc/GetVacuum.arc', h.read())
# open room
# room=parse_bzs(data.get_data('rarc/D000_r00.arc:dat/room.bzs'))
# get objects
# objects=room.children['LAY '].layers[0].children['OBJS'].objects
# find chest with id 68 and replace the content with hookshot
# for obj in objects:
# if obj['name']==b'TBox\x00\x00\x00\x00':
# if (obj['talk_behaviour']&0xF700)>>9 == 64:
# obj['talk_behaviour']==(obj['talk_behaviour']&0xF700)+0x14
# print('patched hookshot')
# for obj in objects:
# if obj['name']==b'TBox\x00\x00\x00\x00':
# if (obj['talk_behaviour']&0xF700)>>9 == 67:
# obj['talk_behaviour']==(obj['talk_behaviour']&0xF700)+0x31
# print('patched gust bellows')
# write room back
# data.update_file('rarc/D000_r00.arc:dat/room.bzs',build_bzs(room))
# write stage to memory
# with open('D000_stg_l0.arc', 'wb') as o:
# data.writeto(o)
return data
def testpatch2():
with open(f'{EXTRACT_ROOT_PATH}/DATA/files/Stage/D000/D000_stg_l0.arc.LZ','rb') as f:
extracted_data=nlzss11.decompress(f.read())
stagearc=U8File.parse_u8(BytesIO(extracted_data))
roomarc=U8File.parse_u8(BytesIO(stagearc.get_file_data(f'rarc/D000_r00.arc')))
room=parseBzs(roomarc.get_file_data('dat/room.bzs'))
objects=room['LAY ']['l0']['OBJS']
# find chest with id 68 and replace the content with hookshot
for obj in objects:
if obj['name']==b'TBox\x00\x00\x00\x00':
if (obj['unk4']&0xFE00)>>9 == 68:
obj['posy']=obj['posy']+50
obj['unk4']=(obj['unk4']&0xFE00)+0x14
print('patched hookshot')
if (obj['unk4']&0xFE00)>>9 == 67:
obj['posy']=obj['posy']+50
obj['unk4']=(obj['unk4']&0xFE00)+0x31
print('patched gust bellows')
roomarc.set_file_data('dat/room.bzs', buildBzs(room))
stagearc.set_file_data('rarc/D000_r00.arc', roomarc.to_buffer())
# add gust bellows and hookshot oarcs so they properly work
with open('oarc/GetHookShot.arc','rb') as f:
arc=f.read()
stagearc.add_file_data('oarc/GetHookShot.arc', arc)
with open('oarc/GetVacuum.arc','rb') as f:
arc=f.read()
stagearc.add_file_data('oarc/GetVacuum.arc', arc)
with open(f'{MODIFIED_ROOT_PATH}/DATA/files/Stage/D000/D000_stg_l0.arc.LZ','wb') as f:
f.write(nlzss11.compress(stagearc.to_buffer()))
def extract_stage_rooms(name: str) -> OrderedDict:
with open(f'{EXTRACT_ROOT_PATH}/DATA/files/Stage/{name}/{name}_stg_l0.arc.LZ','rb') as f:
extracted_data=nlzss11.decompress(f.read())
stagearc=U8File.parse_u8(BytesIO(extracted_data))
stage = parseBzs(stagearc.get_file_data('dat/stage.bzs'))
rooms = OrderedDict()
for i in range(len(stage['RMPL'])):
roomarc=U8File.parse_u8(BytesIO(stagearc.get_file_data(f'rarc/{name}_r{i:02}.arc')))
rooms[f'r{i:02}'] = parseBzs(roomarc.get_file_data('dat/room.bzs'))
return stage, rooms
def upgrade_test():
# patch stage
with get_stagepath('D000',0).open('rb') as f:
extracted_data=nlzss11.decompress(f.read())
stagearc=U8File.parse_u8(BytesIO(extracted_data))
stagedef=parseBzs(stagearc.get_file_data('dat/stage.bzs'))
room0arc=U8File.parse_u8(BytesIO(stagearc.get_file_data('rarc/D000_r00.arc')))
roomdef=parseBzs(room0arc.get_file_data('dat/room.bzs'))
# get chest
chest=next(filter(lambda x: x['name']=='TBox', roomdef['LAY ']['l0']['OBJS']))
chest['anglez']=(chest['anglez']&~0x1FF) | 53 # Beetle
room0arc.set_file_data('dat/room.bzs',buildBzs(roomdef))
# add both beetle models
with open('oarc/GetBeetleA.arc','rb') as h:
stagearc.add_file_data('oarc/GetBeetleA.arc', h.read())
with open('oarc/GetBeetleB.arc','rb') as h:
stagearc.add_file_data('oarc/GetBeetleB.arc', h.read())
stagearc.set_file_data('rarc/D000_r00.arc',room0arc.to_buffer())
# write back
with get_stagepath('D000',0,rootpath=MODIFIED_ROOT_PATH).open('wb') as f:
f.write(nlzss11.compress(stagearc.to_buffer()))
# patch get item event
with open(Path(__file__).parent / EXTRACT_ROOT_PATH / 'DATA' / 'files' / 'EU' / 'Object' / 'en_GB' / '0-Common.arc', 'rb') as f:
evntarc=U8File.parse_u8(BytesIO(f.read()))
itemmsbf=parseMSB(evntarc.get_file_data('0-Common/003-ItemGet.msbf'))
evnt=itemmsbf['FLW3']['flow'][422] # event triggered after beetle text box
evnt['type']='type3'
evnt['subType']=0
evnt['param1']=0
evnt['param3']=9
evnt['param2']=75 # Hook Beetle
evntarc.set_file_data('0-Common/003-ItemGet.msbf',buildMSB(itemmsbf))
with open(Path(__file__).parent / MODIFIED_ROOT_PATH / 'DATA' / 'files' / 'EU' / 'Object' / 'en_GB' / '0-Common.arc', 'wb') as f:
f.write(evntarc.to_buffer())
def upgrade_with_patch():
# config for repacking as ISO
# patcher = Patcher(
# actual_extract_path=Path(__file__).parent / EXTRACT_ROOT_PATH,
# modified_extract_path=Path(__file__).parent / MODIFIED_ROOT_PATH,
# oarc_cache_path=Path(__file__).parent / 'oarc',
# keep_path=True,
# copy_unmodified=False) # set to true during dev to overwrite maybe bad experiments
# for use with riivolution
patcher = Patcher(
actual_extract_path=Path(__file__).parent / EXTRACT_ROOT_PATH,
modified_extract_path=Path(__file__).parent / 'temp',
oarc_cache_path=Path(__file__).parent / 'oarc',
keep_path=False,
copy_unmodified=False)
def patch_D000_r0(roomdef):
chest=next(filter(lambda x: x['name']=='TBox', roomdef['LAY ']['l0']['OBJS']))
chest['anglez']=(chest['anglez']&~0x1FF) | 53 # Beetle
return roomdef
patcher.set_room_patch('D000',0,patch_D000_r0)
def patch_item_get(itemmsbf):
evnt=itemmsbf['FLW3']['flow'][422] # event triggered after beetle text box
evnt['type']='type3'
evnt['subType']=0
evnt['param1']=0
evnt['param3']=9
evnt['param2']=75 # Hook Beetle
return itemmsbf
patcher.set_event_patch('003-ItemGet.msbf', patch_item_get)
patcher.add_stage_oarc('D000',0,['GetBeetleA','GetBeetleB'])
patcher.do_patch()
def bingo_patch():
# open all light pillars as part of the zelda event before the save prompt
# make eldin layer 1 by default, move trial to layer 0, move lava draining to layer 0
# make eldin caves layer 1 only
#
# config for repacking as ISO
patcher = Patcher(
actual_extract_path=Path(__file__).parent / EXTRACT_ROOT_PATH,
modified_extract_path=Path(__file__).parent / MODIFIED_ROOT_PATH,
oarc_cache_path=Path(__file__).parent / 'oarc',
keep_path=True,
copy_unmodified=False) # set to true during dev to overwrite maybe bad experiments
# for use with riivolution
# patcher = Patcher(
# actual_extract_path=Path(__file__).parent / EXTRACT_ROOT_PATH,
# modified_extract_path=Path(__file__).parent / 'temp',
# oarc_cache_path=Path(__file__).parent / 'oarc',
# keep_path=False,
# copy_unmodified=False)
# skyloft: move trial to layer 0
def patch_F000_r0(roomdef):
trial=next(filter(lambda x: x['name']=='WarpObj', roomdef['LAY ']['l4']['OBJ ']))
# trial_butterflies=next(filter(lambda x: x['name']=='InsctTg', roomdef['LAY ']['l4']['STAG']))
# trial['posy'] += 100
# fix object ID of trial
trial['id']=0x02F2
# trial_butterflies['id']=0xFEF3
roomdef['LAY ']['l4']['OBJ '].remove(trial)
roomdef['LAY ']['l0']['OBJ '].append(trial)
# roomdef['LAY ']['l4']['STAG'].remove(trial_butterflies)
# roomdef['LAY ']['l0']['STAG'].append(trial_butterflies)
roomdef['LAY ']['l0']['ARCN'].append('SirenEntrance')
roomdef['LAY ']['l0']['ARCN'].append('PLSwordStick')
roomdef['LAY ']['l0']['ARCN'].append('PLHarpPlay')
roomdef['LAY ']['l0']['OBJN'].append('WarpObj')
patcher.set_room_patch('F000', 0, patch_F000_r0)
patcher.add_stage_oarc('F000', 0, ('SirenEntrance','PLSwordStick','PLHarpPlay'))
# faron: force layer 1 always, add trial to layer 0
def patch_F100(stagedef):
stagedef['LYSE'] = [OrderedDict((('story_flag', -1), ('night', 0), ('layer', 1)))]
return stagedef
patcher.set_stage_patch('F100', patch_F100)
def patch_F100_r0(roomdef):
trial=next(filter(lambda x: x['name']=='WarpObj', roomdef['LAY ']['l3']['OBJ ']))
trial_butterflies=next(filter(lambda x: x['name']=='InsctTg', roomdef['LAY ']['l3']['STAG']))
# trial['posy'] += 100
# fix object ID of trial
trial['id']=0x02F2
trial_butterflies['id']=0xFEF3
roomdef['LAY ']['l3']['OBJ '].remove(trial)
roomdef['LAY ']['l0']['OBJ '].append(trial)
roomdef['LAY ']['l3']['STAG'].remove(trial_butterflies)
roomdef['LAY ']['l0']['STAG'].append(trial_butterflies)
roomdef['LAY ']['l0']['ARCN'].append('SirenEntrance')
roomdef['LAY ']['l0']['ARCN'].append('PLSwordStick')
roomdef['LAY ']['l0']['ARCN'].append('PLHarpPlay')
roomdef['LAY ']['l0']['OBJN'].append('WarpObj')
patcher.set_room_patch('F100', 0, patch_F100_r0)
patcher.add_stage_oarc('F100', 0, ('SirenEntrance','PLSwordStick','PLHarpPlay'))
# deep woods: remove layer 3+
def patch_F101(stagedef):
stagedef['LYSE']=[layer for layer in stagedef['LYSE'] if layer['layer'] < 3]
patcher.set_stage_patch('F101',patch_F101)
def fill_skyloft():
# config for repacking as ISO
patcher = Patcher(
actual_extract_path=Path(__file__).parent / EXTRACT_ROOT_PATH,
modified_extract_path=Path(__file__).parent / MODIFIED_ROOT_PATH,
oarc_cache_path=Path(__file__).parent / 'oarc',
keep_path=True,
copy_unmodified=False) # set to true during dev to overwrite maybe bad experiments
# for use with riivolution
# patcher = Patcher(
# actual_extract_path=Path(__file__).parent / EXTRACT_ROOT_PATH,
# modified_extract_path=Path(__file__).parent / 'temp',
# oarc_cache_path=Path(__file__).parent / 'oarc',
# keep_path=False,
# copy_unmodified=False)
def patch_F000_r0(roomdef):
roomdef['LAY ']['l0']['ARCN'].append('BLastBoss')
roomdef['LAY ']['l0']['ARCN'].append('PLLastBoss')
roomdef['LAY ']['l0']['OBJN'].append('BLasBos')
roomdef['LAY ']['l0']['OBJ '].append(
{
"params1": bytes.fromhex('FFFFFFC0'),
"params2": bytes.fromhex('FFFFFFFF'),
"posx": -4698.884765625,
"posy": 1237.6900634765625,
"posz": -6364.4482421875,
"anglex": 0,
"angley": 0,
"anglez": 0,
"id": 0xFDC5,
"name": "BLasBos"
})
# save_obj=next(filter(lambda x: x['name']=='saveObj', roomdef['LAY ']['l0']['OBJS']))
# 0x1C3
# for i in range(400):
# cloned = save_obj.copy()
# cloned['id'] = 0xFC00 | (0x1C5 + i)
# cloned['posy'] += (i + 1) * 200
# roomdef['LAY ']['l0']['OBJS'].append(cloned)
return roomdef
patcher.set_room_patch('F000',0,patch_F000_r0)
patcher.add_stage_oarc('F000',0,['BLastBoss','PLLastBoss'])
patcher.do_patch()
def patch_faron():
with get_stagepath('F100',0).open('rb') as f:
extracted_data=nlzss11.decompress(f.read())
stagearc=U8File.parse_u8(BytesIO(extracted_data))
# patch layers, force layer 1
stagedef=parseBzs(stagearc.get_file_data('dat/stage.bzs'))
stagedef['LYSE'] = [OrderedDict((('story_flag', -1), ('night', 0), ('layer', 1)))]
stagearc.set_file_data('dat/stage.bzs', buildBzs(stagedef))
room0arc=U8File.parse_u8(BytesIO(stagearc.get_file_data('rarc/F100_r00.arc')))
roomdef=parseBzs(room0arc.get_file_data('dat/room.bzs'))
# grab the trial from layer 3 and put in on layer 0
trial=next(filter(lambda x: x['name']=='WarpObj', roomdef['LAY ']['l3']['OBJ ']))
trial_butterflies=next(filter(lambda x: x['name']=='InsctTg', roomdef['LAY ']['l3']['STAG']))
# trial['posy'] += 100
# fix object ID of trial
trial['id']=0x02F2
trial_butterflies['id']=0xFEF3
roomdef['LAY ']['l3']['OBJ '].remove(trial)
roomdef['LAY ']['l0']['OBJ '].append(trial)
roomdef['LAY ']['l3']['STAG'].remove(trial_butterflies)
roomdef['LAY ']['l0']['STAG'].append(trial_butterflies)
roomdef['LAY ']['l0']['ARCN'].append('SirenEntrance')
roomdef['LAY ']['l0']['ARCN'].append('PLSwordStick')
roomdef['LAY ']['l0']['OBJN'].append('WarpObj')
room0arc.set_file_data('dat/room.bzs', buildBzs(roomdef))
roomdat=BytesIO()
room0arc.writeto(roomdat)
stagearc.set_file_data('rarc/F100_r00.arc', roomdat.getbuffer())
# add the trial arc(s)
with open('oarc/SirenEntrance.arc','rb') as f:
arc=f.read()
stagearc.add_file_data('oarc/SirenEntrance.arc', arc)
with open('oarc/PLHarpPlay.arc','rb') as f:
arc=f.read()
stagearc.add_file_data('oarc/PLHarpPlay.arc', arc)
with open('oarc/PLSwordStick.arc','rb') as f:
arc=f.read()
stagearc.add_file_data('oarc/PLSwordStick.arc', arc)
stagedat=BytesIO()
stagearc.writeto(stagedat)
with get_stagepath('F100',0, rootpath=MODIFIED_ROOT_PATH).open('wb') as f:
f.write(nlzss11.compress(stagedat.getbuffer()))
def demise():
patcher = Patcher(
actual_extract_path=Path(__file__).parent / EXTRACT_ROOT_PATH,
modified_extract_path=Path(__file__).parent / 'temp',
oarc_cache_path=Path(__file__).parent / 'oarc',
keep_path=False,
copy_unmodified=False)
def patch_B400_r0(roomdef):
orig_last_boss = next(filter(lambda x: x['name']=='BLasBos', roomdef['LAY ']['l1']['OBJ ']))
las_bos = orig_last_boss.copy()
las_bos['id'] = 0xFC06
las_bos['posx'] += 1000
roomdef['LAY ']['l1']['OBJ '].append(las_bos)
las_bos = orig_last_boss.copy()
las_bos['id'] = 0xFC07
las_bos['posx'] -= 1000
roomdef['LAY ']['l1']['OBJ '].append(las_bos)
return roomdef
patcher.set_room_patch('B400', 0, patch_B400_r0)
patcher.do_patch()
def extract_obj_pack():
data | |
{"ExonicFunc_ensGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"terms": {"ExonicFunc_refGene": ["frameshift_deletion", "frameshift_insertion", "stopgain", "stoploss"]}},
{"term": {"Func_ensGene": "splicing"}},
{"term": {"Func_refGene": "splicing"}}
],
"minimum_should_match": 1
}
},
"size": 0,
"aggs" : {
"values" : {
"nested" : {
"path" : "AAChange_refGene"
},
"aggs" : {
"values" : {"terms" : {"field" : "AAChange_refGene.Gene", "size" : 30000}}
}
}
}
}"""
results = es.search(index=index_name, doc_type=doc_type_name,
body=compound_heterozygous_query_body_template, request_timeout=120)
return natsorted([ele['key'] for ele in results["aggregations"]["values"]["values"]["buckets"] if ele['key']])
def get_values_from_es(es, index_name, doc_type_name, field_es_name, field_path):
if not field_path:
body_non_nested_template = """
{
"size": 0,
"aggs" : {
"values" : {
"terms" : { "field" : "%s", "size" : 30000 }
}
}
}
"""
body = body_non_nested_template % (field_es_name)
results = es.search(index=index_name, doc_type=doc_type_name, body=body, request_timeout=120)
return [ele['key'] for ele in results["aggregations"]["values"]["buckets"] if ele['key']]
elif field_path:
body_nested_template = """
{
"size": 0,
"aggs" : {
"values" : {
"nested" : {
"path" : "%s"
},
"aggs" : {
"values" : {"terms" : {"field" : "%s.%s", "size" : 30000}}
}
}
}
}
"""
body = body_nested_template % (field_path,
field_path,
field_es_name)
results = es.search(index=index_name, doc_type=doc_type_name, body=body, request_timeout=120)
return [ele['key'] for ele in results["aggregations"]["values"]["values"]["buckets"] if ele['key']]
def get_family_dict(es, index_name, doc_type_name):
family_ids = get_values_from_es(es, index_name, doc_type_name, 'Family_ID', 'sample')
family_dict = {}
body_template = """
{
"_source": false,
"size": 1,
"query": {
"nested": {
"path": "sample",
"score_mode": "none",
"query": {
"bool": {
"must" : [{"term": { "sample.Family_ID": "%s"}},
{"exists": { "field": "sample.Father_ID"}},
{"exists": { "field": "sample.Mother_ID"}}
]
}
},
"inner_hits": {}
}
}
}
"""
family_dict = {}
for family_id in family_ids:
body = body_template % (family_id)
results = es.search(index=index_name, doc_type=doc_type_name, body=body, request_timeout=120)
result = results['hits']['hits'][0]['inner_hits']['sample']['hits']['hits'][0]["_source"]
father_id = result.get('Father_ID')
mother_id = result.get('Mother_ID')
child_id = result.get('Sample_ID')
child_sex = result.get('Sex')
family_dict[family_id] = {'father_id': father_id,
'mother_id': mother_id, 'child_id': child_id, 'child_sex': child_sex}
return family_dict
def pop_sample_with_id(sample_array, sample_id):
saved_index = 0
for index, sample in enumerate(sample_array):
if sample.get('Sample_ID') == sample_id:
saved_index = index
sample = sample_array.pop(saved_index)
return sample
def pop_sample_with_id_apply_compound_het_rules(sample_array, sample_id):
saved_index = 0
for index, sample in enumerate(sample_array):
if sample.get('Sample_ID') == sample_id:
saved_index = index
sample = sample_array.pop(saved_index)
if (sample.get('Mother_Genotype') in ["0/1", "0|1", "1|0"] and
sample.get('Father_Genotype') in ["0/0", "0|0"]):
return sample
elif (sample.get('Mother_Genotype') in ["0/0", "0|0"] and
sample.get('Father_Genotype') in ["0/1", "0|1", "1|0"]):
return sample
return None
def are_variants_compound_heterozygous(variants):
compound_heterozygous_found = False
gt_pair_whose_reverse_to_find = None
compound_heterozygous_variants = []
for variant in variants:
father_gt = variant.get('Father_Genotype')
mother_gt = variant.get('Mother_Genotype')
sum_digits = sum([int(char)
for char in father_gt + mother_gt if char.isdigit()])
if sum_digits != 1:
continue
if not gt_pair_whose_reverse_to_find:
gt_pair_whose_reverse_to_find = [father_gt, mother_gt]
compound_heterozygous_variants.append(variant)
continue
current_gt_pair = [father_gt, mother_gt]
current_gt_pair.reverse()
if gt_pair_whose_reverse_to_find == current_gt_pair:
compound_heterozygous_variants.append(variant)
compound_heterozygous_found = True
if compound_heterozygous_found:
return compound_heterozygous_variants
else:
return False
def annotate_autosomal_recessive(es, index_name, doc_type_name, family_dict, annotation):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
if annotation == 'vep':
query_body = autosomal_recessive_vep_query_body_template % (child_id)
elif annotation == 'annovar':
query_body = autosomal_recessive_annovar_query_body_template % (child_id)
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'autosomal_recessive' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
to_update = False
if mendelian_diseases:
if 'autosomal_recessive' not in mendelian_diseases:
mendelian_diseases.append('autosomal_recessive')
to_update = True
else:
to_update = True
sample['mendelian_diseases'] = ['autosomal_recessive']
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} autosomal_recessive samples'.format(len(list(set(sample_matched)))))
def annotate_denovo(es, index_name, doc_type_name, family_dict):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
query_body = denovo_query_body_template % (child_id)
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'denovo' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
to_update = False
if mendelian_diseases:
if 'denovo' not in mendelian_diseases:
mendelian_diseases.append('denovo')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['denovo']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} denovo samples'.format(len(list(set(sample_matched)))))
def annotate_autosomal_dominant(es, index_name, doc_type_name, family_dict):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
query_body = autosomal_dominant_query_body_template % (child_id)
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
# pprint.pprint(hit["_source"])
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
mendelian_diseases = sample.get('mendelian_diseases', [])
tmp_id = es_id + child_id
if 'autosomal_dominant' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
if is_autosomal_dominant(sample):
to_update = False
if mendelian_diseases:
if 'autosomal_dominant' not in mendelian_diseases:
mendelian_diseases.append('autosomal_dominant')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['autosomal_dominant']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} autosomal dominant samples'.format(len(list(set(sample_matched)))))
range_rules = {
'hg19/GRCh37': ([60001, 2699520], [154931044, 155260560]),
'hg38/GRCh38': ([10001, 2781479], [155701383, 156030895])
}
24, 382, 427
def annotate_x_linked_dominant(es, index_name, doc_type_name, family_dict):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
query_body = x_linked_dominant_query_body_template % (
child_id,
range_rules['hg19/GRCh37'][0][0],
range_rules['hg19/GRCh37'][0][1],
range_rules['hg19/GRCh37'][1][0],
range_rules['hg19/GRCh37'][1][1])
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
# pprint.pprint(hit["_source"])
es_id = hit['_id']
# print(es_id)
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'x_linked_dominant' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
if is_x_linked_dominant(sample):
to_update = False
if mendelian_diseases:
if 'x_linked_dominant' not in mendelian_diseases:
mendelian_diseases.append('x_linked_dominant')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['x_linked_dominant']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} x_linked_dominant samples'.format(len(list(set(sample_matched)))))
def annotate_x_linked_recessive(es, index_name, doc_type_name, family_dict, annotation):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
if annotation == 'vep':
query_body = x_linked_recessive_vep_query_body_template % (
child_id,
range_rules['hg19/GRCh37'][0][0],
range_rules['hg19/GRCh37'][0][1],
range_rules['hg19/GRCh37'][1][0],
range_rules['hg19/GRCh37'][1][1]
)
elif annotation == 'annovar':
query_body = x_linked_recessive_annovar_query_body_template % (
child_id,
range_rules['hg19/GRCh37'][0][0],
range_rules['hg19/GRCh37'][0][1],
range_rules['hg19/GRCh37'][1][0],
range_rules['hg19/GRCh37'][1][1]
)
# print(query_body)
query_body = json.loads(query_body)
for hit in helpers.scan(
es,
query=query_body,
scroll=u'5m',
size=1000,
preserve_order=False,
index=index_name,
doc_type=doc_type_name):
# pprint.pprint(hit["_source"])
es_id = hit['_id']
sample_array = hit["_source"]["sample"]
sample = pop_sample_with_id(sample_array, child_id)
tmp_id = es_id + child_id
mendelian_diseases = sample.get('mendelian_diseases', [])
if 'x_linked_recessive' in mendelian_diseases:
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
continue
if is_x_linked_recessive(sample):
# sample['mendelian_diseases'] = 'x_linked_recessive'
to_update = False
if mendelian_diseases:
if 'x_linked_recessive' not in mendelian_diseases:
mendelian_diseases.append('x_linked_recessive')
print(es_id, mendelian_diseases)
to_update = True
else:
sample['mendelian_diseases'] = ['x_linked_recessive']
to_update = True
if tmp_id not in sample_matched:
sample_matched.append(tmp_id)
# if to_update:
# es.update(index=index_name, doc_type=doc_type_name, id=es_id,
# body={"doc": {"sample": sample_array}})
if to_update:
sample_array.append(sample)
action = {
"_index": index_name,
'_op_type': 'update',
"_type": doc_type_name,
"_id": es_id,
"doc": {
"sample": sample_array
}
}
count += 1
actions.append(action)
if count % 500 == 0:
helpers.bulk(es, actions, refresh=True)
actions = []
helpers.bulk(es, actions, refresh=True)
es.indices.refresh(index_name)
es.cluster.health(wait_for_no_relocating_shards=True)
print('Found {} x_linked_recessive samples'.format(len(list(set(sample_matched)))))
def annotate_x_linked_denovo(es, index_name, doc_type_name, family_dict):
sample_matched = []
for family_id, family in family_dict.items():
count = 0
actions = []
child_id = family.get('child_id')
# print(child_id)
query_body = x_linked_de_novo_query_body_template % (
child_id,
range_rules['hg19/GRCh37'][0][0],
range_rules['hg19/GRCh37'][0][1],
range_rules['hg19/GRCh37'][1][0],
range_rules['hg19/GRCh37'][1][1])
# print(query_body)
query_body | |
utilitiesquantumgates import quantumgates
from utilitiesquantumgates import utilities
from tensorboardutilities import tensorboardutilities
from datetime import datetime
import time
#%% datatypes
npdatatype=np.complex64
tfdatatype=tf.complex64
tfrealdatatype=tf.float32 # to use double switch aboe to complex128
#%% number of training points
# ntrain=100 # training set
# nvalid=50 # validation set
#%% epochs
epochs=100 # maximal number of epochs
display_steps=2 # number of steps between each validations
#%% learning rate
learning_rate=0.01
#%% threshold for stopping iterations in validation cost
threshold_valid=inputaccuracy
#%% set the tensorboard utilities
tensorboarddir = tensorboardutilities.getdirname();
#%% random seed
timestamp = int(time.mktime(datetime.now().timetuple()))
RANDOM_SEED=timestamp
if verbose>1:
print('Random seed = ' + repr(timestamp))
#%% define graph
tf.reset_default_graph()
#%% summaries for tensorflow
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.scalar('norm', tf.norm(var))
tf.summary.histogram('histogram', var)
#%% seed random number generation
tf.set_random_seed(RANDOM_SEED)
np.random.seed(seed=RANDOM_SEED)
#%% generate the tf tensor for the input gate
#XT=tf.constant(X_np,dtype=tfdatatype)
#%% unitary rigging of X
RXT_np=quantumgates.riggunitary(X_np,M)
RXT=tf.constant(RXT_np,dtype=tfdatatype)
#%% random unitary matrix
dataU_np=quantumgates.randomU(M,npdatatype)
U=tf.constant(dataU_np,dtype=tfdatatype)
#%% generate the training matrix
W0=tf.random_uniform([M,M],dtype=tfrealdatatype)
WC=tf.complex(tf.random_uniform([M,M],dtype=tfrealdatatype),tf.random_uniform([M,M],dtype=tfrealdatatype))
Wreal=tf.get_variable("Wr",initializer=W0,dtype=tfrealdatatype)
Wimag=tf.get_variable("Wi",initializer=W0,dtype=tfrealdatatype)
W=tf.get_variable("W",initializer=WC,dtype=tfdatatype,trainable=False)
#%% transfer matrix
transfer_matrix=tf.get_variable("transfer_matrix",initializer=WC,trainable=False)
#%% place holder
x=tf.placeholder(dtype=tfdatatype,shape=(M,1),name="x")
#%% generate training set
xtrains=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(ntrain):
for i in range(M):
xtrains[i,j]=np.random.random_sample()+1j*np.random.random_sample()
#%% normalize training set
xtrains=tf.keras.utils.normalize(xtrains,axis=0,order=2)
#%% generate validation set
xvalids=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(nvalid):
for i in range(M):
xvalids[i,j]=np.random.random_sample()+1j*np.random.random_sample()
#%% normalize validation set
xvalids=tf.keras.utils.normalize(xvalids,axis=0,order=2)
#%% projector that extract the first N rows from a vector M
#project=tf.constant(quantumgates.projector(N,M,npdatatype),dtype=tfdatatype)
#%% equation
with tf.name_scope("equation") as scope:
with tf.name_scope("Wreal") as scope:
variable_summaries(Wreal)
with tf.name_scope("Wimag") as scope:
variable_summaries(Wimag)
yt=tf.matmul(RXT,x)
W=tf.complex(Wreal,Wimag)
transfer_matrix=tf.matmul(U,W)
equation=tf.matmul(transfer_matrix,x)-yt
eqreal=tf.real(equation)
eqimag=tf.imag(equation)
cost_function=tf.reduce_mean(tf.square(eqreal)+
tf.square(eqimag))
tf.summary.scalar('cost_function',cost_function)
#%%TO DO : TRY OTHER MINIMIZER
with tf.name_scope("train") as scope:
# global_step=tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
cost_function)
#%% message
if verbose>0:
print('Running with M ' + repr(M) +
' ntrain ' + repr(ntrain) +
' nvalid ' + repr(nvalid))
#%% writer
train_writer=tf.summary.FileWriter(tensorboarddir)
merged=tf.summary.merge_all()
#%%
xtmp=np.zeros((M,1),dtype=npdatatype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
Tinitial=transfer_matrix.eval()
for epoch in range(epochs):
avg_cost=0.
for i in range(ntrain):
xtmp=np.reshape(xtrains[0:M,i],(M,1))
sess.run(optimizer,feed_dict={x: xtmp})
avg_cost+=sess.run(cost_function, feed_dict={x: xtmp})
summary=sess.run(merged, feed_dict={x: xtmp})
train_writer.add_summary(summary,i+epoch*epochs)
avg_cost=avg_cost/ntrain
# messagers
if epoch % display_steps == 0:
# evaluate the validation error
avg_cost_valid=0.
for i in range(nvalid):
xtmp_valid=np.reshape(xvalids[0:M,i],(M,1))
avg_cost_valid+=sess.run(cost_function, feed_dict=
{x: xtmp_valid})
avg_cost_valid=avg_cost_valid/nvalid
if verbose>1:
print('epoch '+repr(epoch))
print('cost '+repr(avg_cost))
print('valid cost '+repr(avg_cost_valid))
# check the validation cost and if needed exit the iteration
if avg_cost_valid < threshold_valid:
if verbose:
print('Convergence in validation reached at epoch '
+ repr(epoch))
break
if epoch>=epochs-1:
if verbose>0:
print('No convergence, maximal epochs reached '
+repr(epochs))
Tfinal=transfer_matrix.eval()
Wfinal=W.eval()
TVV=tf.matmul(W,W,adjoint_a=True).eval()
# print('Determinant Structure matrix ' + repr(np.linalg.det(dataU_np)))
#%%
if verbose>1:
print("Final Sinput=W")
utilities.printonscreennp(Wfinal)
print("Final TV V for unitarity ")
utilities.printonscreennp(TVV)
print("Initial T")
utilities.printonscreennp(Tinitial)
print("Final T")
utilities.printonscreennp(Tfinal)
#%%
sess.close()
#%% set the output dictionary of parameters
out=dict();
out['accuracy']=threshold_valid
out['epoch']=epoch
out['ntrain']=ntrain
out['nvalid']=nvalid
out['N']=X_np.shape[0]
out['M']=M
out['X']=X_np
return out, Wfinal, Tfinal, Tinitial
#%%%%
def traincomplex(X_np,U_np,
verbose=2,
inputaccuracy=1e-4,
ntrain=100,
nvalid=50):
# Given a gate with size N, and a complex system described by an input MxM U_np transfer matrix
# use a NN to train an input gate to act as the input unitary class
#
# The input gate is only a phase gate, described by a diagonal matrix
# with diagonal exp(i phi1), exp(i phi2), ..., exp(i phin)
#
# with phi1, phi2, ..., phin are trainable
#
# TO DO, make batch training (not use it can train without batch)
#
# Date: 5 April 2019, by Claudio
#
# Input:
# X_np, gate as numpy matrix
# U_np, complex system unitary matrix (not checked if unitary) a numpy matrix
# verbose, 0 no output, 1 minimal, 2 all
#%% vari import here
###### DA FINIRE !!!!!!!!!
from utilitiesquantumgates import quantumgates
from utilitiesquantumgates import utilities
from tensorboardutilities import tensorboardutilities
from datetime import datetime
import time
#%% datatypes
npdatatype=np.complex64
tfdatatype=tf.complex64
tfrealdatatype=tf.float32 # to use double switch aboe to complex128
#%% number of training points
# ntrain=100 # training set
# nvalid=50 # validation set
#%% epochs
epochs=100 # maximal number of epochs
display_steps=2 # number of steps between each validations
#%% learning rate
learning_rate=0.01
#%% threshold for stopping iterations in validation cost
threshold_valid=inputaccuracy
#%% set the tensorboard utilities
tensorboarddir = tensorboardutilities.getdirname();
#%% random seed
timestamp = int(time.mktime(datetime.now().timetuple()))
RANDOM_SEED=timestamp
if verbose>1:
print('Random seed = ' + repr(timestamp))
#%% define graph
tf.reset_default_graph()
#%% summaries for tensorflow
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.scalar('norm', tf.norm(var))
tf.summary.histogram('histogram', var)
#%% seed random number generation
tf.set_random_seed(RANDOM_SEED)
np.random.seed(seed=RANDOM_SEED)
#%% generate the tf tensor for the input gate
#XT=tf.constant(X_np,dtype=tfdatatype)
#Extract N and M in input
N=X_np.shape[0]
M=U_np.shape[0]
#%% unitary rigging of X
RXT_np=quantumgates.riggunitary(X_np,M)
RXT=tf.constant(RXT_np,dtype=tfdatatype)
#%% random unitary matrix
U=tf.constant(U_np,dtype=tfdatatype)
#%% generate the training matrix
W0=tf.random_uniform([M,M],dtype=tfrealdatatype)
WC=tf.complex(tf.random_uniform([M,M],dtype=tfrealdatatype),tf.random_uniform([M,M],dtype=tfrealdatatype))
Wreal=tf.get_variable("Wr",initializer=W0,dtype=tfrealdatatype)
Wimag=tf.get_variable("Wi",initializer=W0,dtype=tfrealdatatype)
W=tf.get_variable("W",initializer=WC,dtype=tfdatatype,trainable=False)
#%% transfer matrix
transfer_matrix=tf.get_variable("transfer_matrix",initializer=WC,trainable=False)
#%% place holder
x=tf.placeholder(dtype=tfdatatype,shape=(M,1),name="x")
#%% generate training set
xtrains=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(ntrain):
for i in range(M):
xtrains[i,j]=np.random.random_sample()+1j*np.random.random_sample()
#%% normalize training set
xtrains=tf.keras.utils.normalize(xtrains,axis=0,order=2)
#%% generate validation set
xvalids=np.zeros((M,ntrain),dtype=npdatatype)
for j in range(nvalid):
for i in range(M):
xvalids[i,j]=np.random.random_sample()+1j*np.random.random_sample()
#%% normalize validation set
xvalids=tf.keras.utils.normalize(xvalids,axis=0,order=2)
#%% projector that extract the first N rows from a vector M
#project=tf.constant(quantumgates.projector(N,M,npdatatype),dtype=tfdatatype)
#%% equation
with tf.name_scope("equation") as scope:
with tf.name_scope("Wreal") as scope:
variable_summaries(Wreal)
with tf.name_scope("Wimag") as scope:
variable_summaries(Wimag)
yt=tf.matmul(RXT,x)
W=tf.complex(Wreal,Wimag)
transfer_matrix=tf.matmul(U,W)
equation=tf.matmul(transfer_matrix,x)-yt
eqreal=tf.real(equation)
eqimag=tf.imag(equation)
cost_function=tf.reduce_mean(tf.square(eqreal)+
tf.square(eqimag))
tf.summary.scalar('cost_function',cost_function)
#%%TO DO : TRY OTHER MINIMIZER
with tf.name_scope("train") as scope:
# global_step=tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
# optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
# optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
# cost_function, global_step=global_step)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
cost_function)
#%% message
if verbose>0:
print('Running with M ' + repr(M) +
' ntrain ' + repr(ntrain) +
' nvalid ' + repr(nvalid))
#%% writer
train_writer=tf.summary.FileWriter(tensorboarddir)
merged=tf.summary.merge_all()
#%%
xtmp=np.zeros((M,1),dtype=npdatatype)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer.add_graph(sess.graph)
Tinitial=transfer_matrix.eval()
for epoch in range(epochs):
avg_cost=0.
for i in range(ntrain):
xtmp=np.reshape(xtrains[0:M,i],(M,1))
sess.run(optimizer,feed_dict={x: xtmp})
avg_cost+=sess.run(cost_function, feed_dict={x: xtmp})
summary=sess.run(merged, feed_dict={x: xtmp})
train_writer.add_summary(summary,i+epoch*epochs)
avg_cost=avg_cost/ntrain
# messagers
if epoch % display_steps == 0:
# evaluate the validation error
avg_cost_valid=0.
for i in range(nvalid):
xtmp_valid=np.reshape(xvalids[0:M,i],(M,1))
avg_cost_valid+=sess.run(cost_function, feed_dict=
{x: xtmp_valid})
avg_cost_valid=avg_cost_valid/nvalid
if verbose>1:
print('epoch '+repr(epoch))
print('cost '+repr(avg_cost))
print('valid cost '+repr(avg_cost_valid))
# check the validation cost and if needed exit the iteration
if avg_cost_valid < threshold_valid:
if verbose:
print('Convergence in validation reached at epoch '
+ repr(epoch))
break
if epoch>=epochs-1:
if verbose>0:
print('No convergence, maximal epochs reached '
+repr(epochs))
Tfinal=transfer_matrix.eval()
Wfinal=W.eval()
TVV=tf.matmul(W,W,adjoint_a=True).eval()
# print('Determinant Structure matrix ' + repr(np.linalg.det(dataU_np)))
#%%
if verbose>1:
print("Final Sinput=W")
utilities.printonscreennp(Wfinal)
print("Final TV V for unitarity ")
utilities.printonscreennp(TVV)
print("Initial T")
utilities.printonscreennp(Tinitial)
print("Final T")
utilities.printonscreennp(Tfinal)
#%%
sess.close()
#%% set the output dictionary of parameters
out=dict();
out['accuracy']=threshold_valid
out['epoch']=epoch
out['ntrain']=ntrain
out['nvalid']=nvalid
out['N']=N
out['M']=M
out['X']=X_np
out['U']=U_np
return out, Wfinal, Tfinal, Tinitial
#%% class for training SLM with single input
class SLM:
def trainSLMsingleinputquantized(X_np,U_np,
verbose=2,
inputaccuracy=1e-4,
epochs=10,display_steps=100,
realMIN=-1.0, realMAX=1.0,
imagMIN=0.0, imagMAX=0.0,
quantizedbits=8):
# Given a gate with size N, generate a random unitary matrix and
# use a NN to train an input gate to act as the input unitary class
#
# Input:
# X_Np, gate as numpy matrix
# M, size embedding space
# verbose, 0 no output, 1 minimal, 2 steps, 3 all
#
# Use single input SLM
#
# WrealMAX, WrealMIN, maximal and minimal value for Wreal
#
# WimagMAX, WimagMIN, maximal and minimal value for Wimag (if both 0 is a real weigth)
#
# quantized bits
#%% vari import here
###### DA FINIRE !!!!!!!!!
from utilitiesquantumgates import | |
<reponame>meGregV/blpapi-python
# service.py
"""A service which provides access to API data (provide or consume).
All API data is associated with a 'Service'. A service object is obtained
from a Session and contains zero or more 'Operations'. A service can be a
provider service (can generate API data) or a consumer service.
"""
from .event import Event
from .name import getNamePair
from .request import Request
from .schema import SchemaElementDefinition
from .exception import _ExceptionUtil
from . import utils
from . import internals
class Operation(object):
"""Defines an operation which can be performed by a Service.
Operation objects are obtained from a Service object. They provide
read-only access to the schema of the Operations Request and the schema of
the possible response.
"""
def __init__(self, handle, sessions):
self.__handle = handle
self.__sessions = sessions
def name(self):
"""Return the name of this Operation."""
return internals.blpapi_Operation_name(self.__handle)
def description(self):
"""Return a human readable description of this Operation."""
return internals.blpapi_Operation_description(self.__handle)
def requestDefinition(self):
"""Return a SchemaElementDefinition for this Operation.
Return a SchemaElementDefinition which defines the schema for this
Operation.
"""
errCode, definition = internals.blpapi_Operation_requestDefinition(
self.__handle)
return None if 0 != errCode else\
SchemaElementDefinition(definition, self.__sessions)
def numResponseDefinitions(self):
"""Return the number of the response types for this Operation.
Return the number of the response types that can be returned by this
Operation.
"""
return internals.blpapi_Operation_numResponseDefinitions(self.__handle)
def getResponseDefinitionAt(self, position):
"""Return a SchemaElementDefinition for the response to this Operation.
Return a SchemaElementDefinition which defines the schema for the
response that this Operation delivers.
If 'position' >= numResponseDefinitions() an exception is raised.
"""
errCode, definition = internals.blpapi_Operation_responseDefinition(
self.__handle,
position)
_ExceptionUtil.raiseOnError(errCode)
return SchemaElementDefinition(definition, self.__sessions)
def responseDefinitions(self):
"""Return an iterator over response for this Operation.
Return an iterator over response types that can be returned by this
Operation.
Response type is defined by SchemaElementDefinition object.
"""
return utils.Iterator(self,
Operation.numResponseDefinitions,
Operation.getResponseDefinitionAt)
def _sessions(self):
"""Return session(s) this object is related to. For internal use."""
return self.__sessions
class Service(object):
"""Defines a service which provides access to API data.
A Service object is obtained from a Session and contains the Operations
(each of which contains its own schema) and the schema for Events which
this Service may produce. A Service object is also used to create Request
objects used with a Session to issue requests.
Provider services are created to generate API data and must be registered
before use.
The Service object is a handle to the underlying data which is owned by the
Session. Once a Service has been succesfully opened in a Session it remains
accessible until the Session is terminated.
"""
def __init__(self, handle, sessions):
self.__handle = handle
self.__sessions = sessions
internals.blpapi_Service_addRef(self.__handle)
def __del__(self):
try:
self.destroy()
except (NameError, AttributeError):
pass
def destroy(self):
if self.__handle:
internals.blpapi_Service_release(self.__handle)
self.__handle = None
def __str__(self):
"""Convert the service schema to a string."""
return self.toString()
def toString(self, level=0, spacesPerLevel=4):
"""Convert this Service schema to a string.
Convert this Service schema to a string at (absolute value specified
for) the optionally specified indentation 'level'. If 'level' is
specified, optionally specify 'spacesPerLevel', the number of spaces
per indentation level for this and all of its nested objects. If
'level' is negative, suppress indentation of the first line. If
'spacesPerLevel' is negative, format the entire output on one line,
suppressing all but the initial indentation (as governed by 'level').
"""
return internals.blpapi_Service_printHelper(self.__handle,
level,
spacesPerLevel)
def createPublishEvent(self):
"""Create an Event suitable for publishing to this Service.
Use an EventFormatter to add Messages to the Event and set fields.
"""
errCode, event = internals.blpapi_Service_createPublishEvent(
self.__handle)
_ExceptionUtil.raiseOnError(errCode)
return Event(event, self.__sessions)
def createAdminEvent(self):
"""Create an Admin Event suitable for publishing to this Service.
Use an EventFormatter to add Messages to the Event and set fields.
"""
errCode, event = internals.blpapi_Service_createAdminEvent(
self.__handle)
_ExceptionUtil.raiseOnError(errCode)
return Event(event, self.__sessions)
def createResponseEvent(self, correlationId):
"""Create a response Event to answer the request.
Use an EventFormatter to add a Message to the Event and set fields.
"""
errCode, event = internals.blpapi_Service_createResponseEvent(
self.__handle,
correlationId._handle())
_ExceptionUtil.raiseOnError(errCode)
return Event(event, self.__sessions)
def name(self):
"""Return the name of this service."""
return internals.blpapi_Service_name(self.__handle)
def description(self):
"""Return a human-readable description of this service."""
return internals.blpapi_Service_description(self.__handle)
def hasOperation(self, name):
"""Return True if the specified 'name' is a valid Operation.
Return True if the specified 'name' identifies a valid Operation in
this Service.
"""
names = getNamePair(name)
return internals.blpapi_Service_hasOperation(self.__handle,
names[0],
names[1])
def getOperation(self, nameOrIndex):
"""Return a specified operation.
Return an 'Operation' object identified by the specified
'nameOrIndex', which must be either a string, a Name, or an integer.
If 'nameOrIndex' is a string or a Name and 'hasOperation(nameOrIndex)
!= True', or if 'nameOrIndex' is an integer and 'nameOrIndex >=
numOperations()', then an exception is raised.
"""
if not isinstance(nameOrIndex, int):
names = getNamePair(nameOrIndex)
errCode, operation = internals.blpapi_Service_getOperation(
self.__handle, names[0], names[1])
_ExceptionUtil.raiseOnError(errCode)
return Operation(operation, self.__sessions)
errCode, operation = internals.blpapi_Service_getOperationAt(
self.__handle,
nameOrIndex)
_ExceptionUtil.raiseOnError(errCode)
return Operation(operation, self.__sessions)
def numOperations(self):
"""Return the number of Operations defined by this Service."""
return internals.blpapi_Service_numOperations(self.__handle)
def operations(self):
"""Return an iterator over Operations defined by this Service"""
return utils.Iterator(self,
Service.numOperations,
Service.getOperation)
def hasEventDefinition(self, name):
"""Return True if the specified 'name' identifies a valid event.
Return True if the specified 'name' identifies a valid event in this
Service, False otherwise.
Exception is raised if 'name' is neither a Name nor a string.
"""
names = getNamePair(name)
return internals.blpapi_Service_hasEventDefinition(self.__handle,
names[0],
names[1])
def getEventDefinition(self, nameOrIndex):
"""Return the definition of a specified event.
Return a 'SchemaElementDefinition' object describing the element
identified by the specified 'nameOrIndex', which must be either a
string or an integer. If 'nameOrIndex' is a string and
'hasEventDefinition(nameOrIndex) != True', then a 'NotFoundException'
is raised; if 'nameOrIndex' is an integer and 'nameOrIndex >=
numEventDefinitions()' then an 'IndexOutOfRangeException' is raised.
"""
if not isinstance(nameOrIndex, int):
names = getNamePair(nameOrIndex)
errCode, definition = internals.blpapi_Service_getEventDefinition(
self.__handle,
names[0],
names[1])
_ExceptionUtil.raiseOnError(errCode)
return SchemaElementDefinition(definition, self.__sessions)
errCode, definition = internals.blpapi_Service_getEventDefinitionAt(
self.__handle,
nameOrIndex)
_ExceptionUtil.raiseOnError(errCode)
return SchemaElementDefinition(definition, self.__sessions)
def numEventDefinitions(self):
"""Return the number of unsolicited events defined by this Service."""
return internals.blpapi_Service_numEventDefinitions(self.__handle)
def eventDefinitions(self):
"""Return an iterator over unsolicited events defined by this Service.
"""
return utils.Iterator(self,
Service.numEventDefinitions,
Service.getEventDefinition)
def authorizationServiceName(self):
"""Return the authorization service name.
Return the name of the Service which must be used in order to authorize
access to restricted operations on this Service. If no authorization is
required to access operations on this service an empty string is
returned. Authorization services never require authorization to use.
"""
return internals.blpapi_Service_authorizationServiceName(self.__handle)
def createRequest(self, operation):
"""Return a empty Request object for the specified 'operation'.
If 'operation' does not identify a valid operation in the Service then
an exception is raised.
An application must populate the Request before issuing it using
Session.sendRequest().
"""
errCode, request = internals.blpapi_Service_createRequest(
self.__handle,
operation)
_ExceptionUtil.raiseOnError(errCode)
return Request(request, self.__sessions)
def createAuthorizationRequest(self, authorizationOperation=None):
"""Return an empty Request object for 'authorizationOperation'.
If the 'authorizationOperation' does not identify a valid operation for
this Service then an exception is raised.
An application must populate the Request before issuing it using
Session.sendAuthorizationRequest().
"""
errCode, request = internals.blpapi_Service_createAuthorizationRequest(
self.__handle,
authorizationOperation)
_ExceptionUtil.raiseOnError(errCode)
return Request(request, self.__sessions)
def _handle(self):
"""Return the internal implementation."""
return self.__handle
def _sessions(self):
"""Return session(s) this object is related to. For internal use."""
return self.__sessions
__copyright__ = """
Copyright 2012. Bloomberg Finance L.P.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: The above
copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE | |
True:
return (
65739, # {U_io_bind1}
None,
None,
None,
_idris_Python_46_Prim_46_next(None, e8),
(65753, e10, e9, e8) # {U_{Python.Prim.iterate:iter:0_lam5}1}
)
# Prelude.List.reverse, reverse'
def _idris_Prelude_46_List_46_reverse_58_reverse_39__58_0(e0, e1, e2):
while True:
if e2: # Prelude.List.::
in0, in1 = e2.head, e2.tail
e0, e1, e2, = None, e1.cons(in0), in1,
continue
return _idris_error("unreachable due to tail call")
else: # Prelude.List.Nil
return e1
return _idris_error("unreachable due to case in tail position")
# Decidable.Equality.Decidable.Equality.Char implementation of Decidable.Equality.DecEq, method decEq, primitiveNotEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_Char_58__33_decEq_58_0_58_primitiveNotEq_58_0():
while True:
return None
# Decidable.Equality.Decidable.Equality.Int implementation of Decidable.Equality.DecEq, method decEq, primitiveNotEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_Int_58__33_decEq_58_0_58_primitiveNotEq_58_0():
while True:
return None
# Decidable.Equality.Decidable.Equality.Integer implementation of Decidable.Equality.DecEq, method decEq, primitiveNotEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_Integer_58__33_decEq_58_0_58_primitiveNotEq_58_0():
while True:
return None
# Decidable.Equality.Decidable.Equality.ManagedPtr implementation of Decidable.Equality.DecEq, method decEq, primitiveNotEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_ManagedPtr_58__33_decEq_58_0_58_primitiveNotEq_58_0():
while True:
return None
# Decidable.Equality.Decidable.Equality.Ptr implementation of Decidable.Equality.DecEq, method decEq, primitiveNotEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_Ptr_58__33_decEq_58_0_58_primitiveNotEq_58_0():
while True:
return None
# Decidable.Equality.Decidable.Equality.String implementation of Decidable.Equality.DecEq, method decEq, primitiveNotEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_String_58__33_decEq_58_0_58_primitiveNotEq_58_0():
while True:
return None
# Decidable.Equality.Decidable.Equality.Bool implementation of Decidable.Equality.DecEq, method decEq
def _idris_Decidable_46_Equality_46_Decidable_46_Equality_46__64_Decidable_46_Equality_46_DecEq_36_Bool_58__33_decEq_58_0(
e0, e1
):
while True:
if not e1: # Prelude.Bool.False
if not e0: # Prelude.Bool.False
return (0,) # Prelude.Basics.Yes
else: # Prelude.Bool.True
return (1,) # Prelude.Basics.No
return _idris_error("unreachable due to case in tail position")
else: # Prelude.Bool.True
if not e0: # Prelude.Bool.False
return (1,) # Prelude.Basics.No
else: # Prelude.Bool.True
return (0,) # Prelude.Basics.Yes
return _idris_error("unreachable due to case in tail position")
return _idris_error("unreachable due to case in tail position")
# Prelude.Interfaces.Prelude.Nat.Nat implementation of Prelude.Interfaces.Eq, method ==
def _idris_Prelude_46_Interfaces_46_Prelude_46_Nat_46__64_Prelude_46_Interfaces_46_Eq_36_Nat_58__33__61__61__58_0(
e0, e1
):
while True:
if e1 == 0:
if e0 == 0:
return True
else:
return False
return _idris_error("unreachable due to case in tail position")
elif True:
in0 = (e1 - 1)
if e0 == 0:
return False
else:
in1 = (e0 - 1)
e0, e1, = in1, in0,
continue
return _idris_error("unreachable due to tail call")
return _idris_error("unreachable due to case in tail position")
else:
return False
return _idris_error("unreachable due to case in tail position")
# Prelude.Interfaces.Prelude.Show.Prec implementation of Prelude.Interfaces.Eq, method ==
def _idris_Prelude_46_Interfaces_46_Prelude_46_Show_46__64_Prelude_46_Interfaces_46_Eq_36_Prec_58__33__61__61__58_0(
e0, e1
):
while True:
if e1[0] == 4: # Prelude.Show.User
in0 = e1[1]
if e0[0] == 4: # Prelude.Show.User
in1 = e0[1]
return _idris_Prelude_46_Interfaces_46_Prelude_46_Nat_46__64_Prelude_46_Interfaces_46_Eq_36_Nat_58__33__61__61__58_0(
in1, in0
)
else:
aux1 = (_idris_Prelude_46_Show_46_precCon(e0) == _idris_Prelude_46_Show_46_precCon(e1))
if aux1 == 0:
return False
else:
return True
return _idris_error("unreachable due to case in tail position")
return _idris_error("unreachable due to case in tail position")
else:
aux2 = (_idris_Prelude_46_Show_46_precCon(e0) == _idris_Prelude_46_Show_46_precCon(e1))
if aux2 == 0:
return False
else:
return True
return _idris_error("unreachable due to case in tail position")
return _idris_error("unreachable due to case in tail position")
# Prelude.Foldable.Prelude.List.List implementation of Prelude.Foldable.Foldable, method foldr
def _idris_Prelude_46_Foldable_46_Prelude_46_List_46__64_Prelude_46_Foldable_46_Foldable_36_List_58__33_foldr_58_0(
e0, e1, e2, e3, e4
):
while True:
if e4: # Prelude.List.::
in0, in1 = e4.head, e4.tail
return APPLY0(
APPLY0(e2, in0),
_idris_Prelude_46_Foldable_46_Prelude_46_List_46__64_Prelude_46_Foldable_46_Foldable_36_List_58__33_foldr_58_0(
None, None, e2, e3, in1
)
)
else: # Prelude.List.Nil
return e3
return _idris_error("unreachable due to case in tail position")
# Prelude.Functor.Prelude.Monad.IO' ffi implementation of Prelude.Functor.Functor, method map
def _idris_Prelude_46_Functor_46_Prelude_46_Monad_46__64_Prelude_46_Functor_46_Functor_36_IO_39__32_ffi_58__33_map_58_0(
e0, e1, e2, e3, e4
):
while True:
return (65739, None, None, None, e4, (65703, e3)) # {U_io_bind1}, {U_Prelude.Functor.{[email protected]$IO' ffi:!map:0_lam0}1}
# Prelude.Interfaces.Prelude.Interfaces.Integer implementation of Prelude.Interfaces.Ord, method compare
def _idris_Prelude_46_Interfaces_46_Prelude_46_Interfaces_46__64_Prelude_46_Interfaces_46_Ord_36_Integer_58__33_compare_58_0(
e0, e1
):
while True:
aux2 = (e0 == e1)
if aux2 == 0:
aux3 = False
else:
aux3 = True
aux1 = aux3
if not aux1: # Prelude.Bool.False
aux5 = (e0 < e1)
if aux5 == 0:
aux6 = False
else:
aux6 = True
aux4 = aux6
if not aux4: # Prelude.Bool.False
return (2,) # Prelude.Interfaces.GT
else: # Prelude.Bool.True
return (0,) # Prelude.Interfaces.LT
return _idris_error("unreachable due to case in tail position")
else: # Prelude.Bool.True
return (1,) # Prelude.Interfaces.EQ
return _idris_error("unreachable due to case in tail position")
# Prelude.Interfaces.Prelude.Nat.Nat implementation of Prelude.Interfaces.Ord, method compare
def _idris_Prelude_46_Interfaces_46_Prelude_46_Nat_46__64_Prelude_46_Interfaces_46_Ord_36_Nat_58__33_compare_58_0(
e0, e1
):
while True:
if e1 == 0:
if e0 == 0:
return (1,) # Prelude.Interfaces.EQ
else:
in0 = (e0 - 1)
return (2,) # Prelude.Interfaces.GT
return _idris_error("unreachable due to case in tail position")
else:
in1 = (e1 - 1)
if e0 == 0:
return (0,) # Prelude.Interfaces.LT
else:
in2 = (e0 - 1)
e0, e1, = in2, in1,
continue
return _idris_error("unreachable due to tail call")
return _idris_error("unreachable due to case in tail position")
return _idris_error("unreachable due to case in tail position")
# Prelude.Interfaces.Prelude.Show.Prec implementation of Prelude.Interfaces.Ord, method >=
def _idris_Prelude_46_Interfaces_46_Prelude_46_Show_46__64_Prelude_46_Interfaces_46_Ord_36_Prec_58__33__62__61__58_0(
e0, e1
):
while True:
aux2 = _idris_Prelude_46_Interfaces_46_Prelude_46_Show_46__64_Prelude_46_Interfaces_46_Ord_36_Prec_58__33_compare_58_0(
e0, e1
)
if aux2[0] == 2: # Prelude.Interfaces.GT
aux3 = True
else:
aux3 = False
aux1 = aux3
if not aux1: # Prelude.Bool.False
return _idris_Prelude_46_Interfaces_46__123_Prelude_46_Show_46__64_Prelude_46_Interfaces_46_Ord_36_Prec_58__33__62__61__58_0_95_lam0_125_(
e0, e1
)
else: # Prelude.Bool.True
return True
return _idris_error("unreachable due to case in tail position")
# Prelude.Interfaces.Prelude.Show.Prec implementation of Prelude.Interfaces.Ord, method compare
def _idris_Prelude_46_Interfaces_46_Prelude_46_Show_46__64_Prelude_46_Interfaces_46_Ord_36_Prec_58__33_compare_58_0(
e0, e1
):
while True:
if e1[0] == 4: # Prelude.Show.User
in0 = e1[1]
if e0[0] == 4: # Prelude.Show.User
in1 = e0[1]
return _idris_Prelude_46_Interfaces_46_Prelude_46_Nat_46__64_Prelude_46_Interfaces_46_Ord_36_Nat_58__33_compare_58_0(
in1, in0
)
else:
return _idris_Prelude_46_Interfaces_46_Prelude_46_Interfaces_46__64_Prelude_46_Interfaces_46_Ord_36_Integer_58__33_compare_58_0(
_idris_Prelude_46_Show_46_precCon(e0),
_idris_Prelude_46_Show_46_precCon(e1)
)
return _idris_error("unreachable due to case in tail position")
else:
return _idris_Prelude_46_Interfaces_46_Prelude_46_Interfaces_46__64_Prelude_46_Interfaces_46_Ord_36_Integer_58__33_compare_58_0(
_idris_Prelude_46_Show_46_precCon(e0),
_idris_Prelude_46_Show_46_precCon(e1)
)
return _idris_error("unreachable due to case in tail position")
# Prelude.Show.Prelude.Show.Nat implementation of Prelude.Show.Show, method show
def _idris_Prelude_46_Show_46_Prelude_46_Show_46__64_Prelude_46_Show_46_Show_36_Nat_58__33_show_58_0(
e0
):
while True:
return _idris_Prelude_46_Show_46_primNumShow(None, (65741,), (0,), e0) # {U_prim__toStrBigInt1}, Prelude.Show.Open
# with block in Prelude.Strings.strM
def _idris__95_Prelude_46_Strings_46_strM_95_with_95_22(e0, e1):
while True:
if e1[0] == 1: # Prelude.Basics.No
return (0,) # Prelude.Strings.StrNil
else: # Prelude.Basics.Yes
return (1, e0[0]) # Prelude.Strings.StrCons
return _idris_error("unreachable due to case in tail position")
# with block in Prelude.Interfaces.Prelude.Show.Prec implementation of Prelude.Interfaces.Ord, method >
def _idris__95_Prelude_46_Interfaces_46_Prelude_46_Show_46__64_Prelude_46_Interfaces_46_Ord_36_Prec_58__33__62__58_0_95_with_95_27(
e0, e1, e2
):
while True:
if e0[0] == 2: # Prelude.Interfaces.GT
return True
else:
return False
return _idris_error("unreachable due to case in tail position")
# with block in Prelude.Show.firstCharIs
def _idris__95_Prelude_46_Show_46_firstCharIs_95_with_95_44(e0, e1, e2):
while True:
if e2[0] == 1: # Prelude.Strings.StrCons
in0 = e2[1]
return APPLY0(e0, in0)
else: # Prelude.Strings.StrNil
return False
return _idris_error("unreachable due to case in tail position")
# constructor of Prelude.Algebra.Monoid#Semigroup ty
def _idris_Prelude_46_Algebra_46_Monoid_95_ictor_35__34_Semigroup_32_ty_34_(e0, e1):
while True:
assert e1[0] == 0 # constructor of Prelude.Algebra.Monoid
in0, in1 = e1[1:]
return in0
return _idris_error("unreachable due to case in tail position")
# Python.Exceptions.case block in fromString at ./Python/Exceptions.idr:56:21
def _idris_Python_46_Exceptions_46_fromString_95__95__95__95__95_Python_95__95_Exceptions_95__95_idr_95_56_95_21_95_case(
e0, e1
):
while True:
return {
u'ArithmeticError': (3,), # Python.Exceptions.ArithmeticError
u'AssertionError': (7,), # Python.Exceptions.AssertionError
u'AttributeError': (8,), # Python.Exceptions.AttributeError
u'BufferError': (2,), # Python.Exceptions.BufferError
u'EOFError': (14,), # Python.Exceptions.EOFError
u'EnvironmentError': (9,), # Python.Exceptions.EnvironmentError
u'FloatingPointError': (4,), # Python.Exceptions.FloatingPointError
u'IOError': (10,), # Python.Exceptions.IOError
u'ImportError': (15,), # Python.Exceptions.ImportError
u'IndentationError': (26,), # Python.Exceptions.IndentationError
u'IndexError': (17,), # Python.Exceptions.IndexError
u'KeyError': (18,), # Python.Exceptions.KeyError
u'LookupError': (16,), # Python.Exceptions.LookupError
u'MemoryError': (19,), # Python.Exceptions.MemoryError
u'NameError': (20,), # Python.Exceptions.NameError
u'NotImplementedError': (24,), # Python.Exceptions.NotImplementedError
u'OSError': (11,), # Python.Exceptions.OSError
u'OverflowError': (5,), # Python.Exceptions.OverflowError
u'ReferenceError': (22,), # Python.Exceptions.ReferenceError
u'RuntimeError': (23,), # Python.Exceptions.RuntimeError
u'StandardError': (1,), # Python.Exceptions.StandardError
u'StopIteration': (0,), # Python.Exceptions.StopIteration
u'SyntaxError': (25,), # Python.Exceptions.SyntaxError
u'SystemError': (28,), # Python.Exceptions.SystemError
u'TabError': (27,), # Python.Exceptions.TabError
u'TypeError': (29,), # Python.Exceptions.TypeError
u'UnboundLocalError': (21,), # Python.Exceptions.UnboundLocalError
u'UnicodeDecodeError': (32,), # Python.Exceptions.UnicodeDecodeError
u'UnicodeEncodeError': (33,), # Python.Exceptions.UnicodeEncodeError
u'UnicodeError': (31,), # Python.Exceptions.UnicodeError
u'UnicodeTranslateError': (34,), # Python.Exceptions.UnicodeTranslateError
u'VMSError': (13,), # Python.Exceptions.VMSError
u'ValueError': (30,), # Python.Exceptions.ValueError
u'WindowsError': (12,), # Python.Exceptions.WindowsError
u'ZeroDivisionError': (6,) # Python.Exceptions.ZeroDivisionError
}.get(e0, (35,)) # Python.Exceptions.Other
# Python.Exceptions.case block in try at ./Python/Exceptions.idr:106:16
def _idris_Python_46_Exceptions_46_try_95__95__95__95__95_Python_95__95_Exceptions_95__95_idr_95_106_95_16_95_case(
e0, e1, e2, e3, e4
):
while True:
if e3[0] == 0: # Prelude.Either.Left
in0 = e3[1]
return (
65740, # {U_io_pure1}
None,
None,
(
1, # Python.Exceptions.Except
_idris_Python_46_Exceptions_46_fromString(
_idris_Python_46_Fields_46__47__46_(
None,
None,
_idris_Python_46_Fields_46__47__46_(None, None, in0, u'__class__', None),
u'__name__',
None
)
),
in0
)
)
else: # Prelude.Either.Right
in1 = e3[1]
return (65740, None, None, (0, in1)) # {U_io_pure1}, Python.Exceptions.OK
return _idris_error("unreachable due to case in tail position")
# Python.Exceptions.case block in case block in try at ./Python/Exceptions.idr:106:16 at ./Python/Exceptions.idr:118:10
def _idris_Python_46_Exceptions_46_try_95__95__95__95__95_Python_95__95_Exceptions_95__95_idr_95_106_95_16_95_case_95__95__95__95__95_Python_95__95_Exceptions_95__95_idr_95_118_95_10_95_case(
e0, e1, e2, e3, e4, e5
):
while True:
if e2[0] == 0: # Prelude.Either.Left
in0 = e2[1]
return (
65740, # {U_io_pure1}
None,
None,
(
1, # Python.Exceptions.Except
_idris_Python_46_Exceptions_46_fromString(
_idris_Python_46_Fields_46__47__46_(
None,
None,
_idris_Python_46_Fields_46__47__46_(None, None, in0, u'__class__', None),
u'__name__',
None
)
),
in0
)
)
else: # Prelude.Either.Right
in1 = e2[1]
return (65740, None, None, (0, in1)) # {U_io_pure1}, Python.Exceptions.OK
return _idris_error("unreachable due to case in tail position")
# Python.Exceptions.case block in catch at ./Python/Exceptions.idr:130:16
def _idris_Python_46_Exceptions_46_catch_95__95__95__95__95_Python_95__95_Exceptions_95__95_idr_95_130_95_16_95_case(
e0, e1, e2, e3, e4, e5
):
while True:
if e4[0] == 1: # Python.Exceptions.Except
in0, in1 = e4[1:]
return APPLY0(APPLY0(e2, in0), in1)
else: # Python.Exceptions.OK
in2 = e4[1]
return (65740, None, None, in2) # {U_io_pure1}
return _idris_error("unreachable due to case in tail position")
# Python.Prim.case block in next at ./Python/Prim.idr:61:11
def _idris_Python_46_Prim_46_next_95__95__95__95__95_Python_95__95_Prim_95__95_idr_95_61_95_11_95_case(
e0, e1, e2, | |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for Secondary Storage
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
from marvin.cloudstackAPI import (listImageStores)
from marvin.cloudstackAPI import (updateImageStore)
#Import System modules
import time
_multiprocess_shared_ = True
class TestSecStorageServices(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.apiclient = super(TestSecStorageServices, cls).getClsTestClient().getApiClient()
cls._cleanup = []
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
# Get Zone and pod
self.zones = []
self.pods = []
for zone in self.config.zones:
cmd = listZones.listZonesCmd()
cmd.name = zone.name
z = self.apiclient.listZones(cmd)
if isinstance(z, list) and len(z) > 0:
self.zones.append(z[0].id)
for pod in zone.pods:
podcmd = listPods.listPodsCmd()
podcmd.zoneid = z[0].id
p = self.apiclient.listPods(podcmd)
if isinstance(p, list) and len(p) >0:
self.pods.append(p[0].id)
self.domains = []
dcmd = listDomains.listDomainsCmd()
domains = self.apiclient.listDomains(dcmd)
assert isinstance(domains, list) and len(domains) > 0
for domain in domains:
self.domains.append(domain.id)
return
def tearDown(self):
try:
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false")
def test_01_sys_vm_start(self):
"""Test system VM start
"""
# 1. verify listHosts has all 'routing' hosts in UP state
# 2. verify listStoragePools shows all primary storage pools
# in UP state
# 3. verify that secondary storage was added successfully
list_hosts_response = list_hosts(
self.apiclient,
type='Routing',
)
self.assertEqual(
isinstance(list_hosts_response, list),
True,
"Check list response returns a valid list"
)
# ListHosts has all 'routing' hosts in UP state
self.assertNotEqual(
len(list_hosts_response),
0,
"Check list host response"
)
for host in list_hosts_response:
self.assertEqual(
host.state,
'Up',
"Check state of routing hosts is Up or not"
)
# ListStoragePools shows all primary storage pools in UP state
list_storage_response = list_storage_pools(
self.apiclient,
)
self.assertEqual(
isinstance(list_storage_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_storage_response),
0,
"Check list storage pools response"
)
for primary_storage in list_hosts_response:
self.assertEqual(
primary_storage.state,
'Up',
"Check state of primary storage pools is Up or not"
)
for _ in range(2):
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
"Check list response returns a valid list"
)
#Verify SSVM response
self.assertNotEqual(
len(list_ssvm_response),
0,
"Check list System VMs response"
)
for ssvm in list_ssvm_response:
if ssvm.state != 'Running':
time.sleep(30)
continue
for ssvm in list_ssvm_response:
self.assertEqual(
ssvm.state,
'Running',
"Check whether state of SSVM is running"
)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false")
def test_02_sys_template_ready(self):
"""Test system templates are ready
"""
# Validate the following
# If SSVM is in UP state and running
# 1. wait for listTemplates to show all builtin templates downloaded and
# in Ready state
hypervisors = {}
for zone in self.config.zones:
for pod in zone.pods:
for cluster in pod.clusters:
hypervisors[cluster.hypervisor] = "self"
for zid in self.zones:
for k, v in list(hypervisors.items()):
self.debug("Checking BUILTIN templates in zone: %s" %zid)
list_template_response = list_templates(
self.apiclient,
hypervisor=k,
zoneid=zid,
templatefilter=v,
listall=True,
account='system'
)
self.assertEqual(validateList(list_template_response)[0], PASS,\
"templates list validation failed")
# Ensure all BUILTIN templates are downloaded
templateid = None
for template in list_template_response:
if template.templatetype == "BUILTIN":
templateid = template.id
template_response = list_templates(
self.apiclient,
id=templateid,
zoneid=zid,
templatefilter=v,
listall=True,
account='system'
)
if isinstance(template_response, list):
template = template_response[0]
else:
raise Exception("ListTemplate API returned invalid list")
if template.status == 'Download Complete':
self.debug("Template %s is ready in zone %s"%(template.templatetype, zid))
elif 'Downloaded' not in template.status.split():
self.debug("templates status is %s"%template.status)
self.assertEqual(
template.isready,
True,
"Builtin template is not ready %s in zone %s"%(template.status, zid)
)
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false")
def test_03_check_read_only_flag(self):
"""Test the secondary storage read-only flag
"""
# Validate the following
# It is possible to enable/disable the read-only flag on a secondary storage and filter by it
# 1. Make the first secondary storage as read-only and verify its state has been changed
# 2. Search for the read-only storages and make sure ours is in the list
# 3. Make it again read/write and verify it has been set properly
first_storage = self.list_secondary_storages(self.apiclient)[0]
first_storage_id = first_storage['id']
# Step 1
self.update_secondary_storage(self.apiclient, first_storage_id, True)
updated_storage = self.list_secondary_storages(self.apiclient, first_storage_id)[0]
self.assertEqual(
updated_storage['readonly'],
True,
"Check if the secondary storage status has been set to read-only"
)
# Step 2
readonly_storages = self.list_secondary_storages(self.apiclient, readonly=True)
self.assertEqual(
isinstance(readonly_storages, list),
True,
"Check list response returns a valid list"
)
result = any(d['id'] == first_storage_id for d in readonly_storages)
self.assertEqual(
result,
True,
"Check if we are able to list storages by their read-only status"
)
# Step 3
self.update_secondary_storage(self.apiclient, first_storage_id, False)
updated_storage = self.list_secondary_storages(self.apiclient, first_storage_id)[0]
self.assertEqual(
updated_storage['readonly'],
False,
"Check if the secondary storage status has been set back to read-write"
)
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false")
def test_04_migrate_to_read_only_storage(self):
"""Test migrations to a read-only secondary storage
"""
# Validate the following
# It is not possible to migrate a storage to a read-only one
# NOTE: This test requires more than one secondary storage in the system
# 1. Make the first storage read-only
# 2. Try complete migration from the second to the first storage - it should fail
# 3. Try balanced migration from the second to the first storage - it should fail
# 4. Make the first storage read-write again
storages = self.list_secondary_storages(self.apiclient)
if (len(storages)) < 2:
self.skipTest(
"This test requires more than one secondary storage")
first_storage = self.list_secondary_storages(self.apiclient)[0]
first_storage_id = first_storage['id']
second_storage = self.list_secondary_storages(self.apiclient)[1]
second_storage_id = second_storage['id']
# Set the first storage to read-only
self.update_secondary_storage(self.apiclient, first_storage_id, True)
# Try complete migration from second to the first storage
success = False
try:
self.migrate_secondary_storage(self.apiclient, second_storage_id, first_storage_id, "complete")
except Exception as ex:
if re.search("No destination valid store\(s\) available to migrate.", str(ex)):
success = True
else:
self.debug("Secondary storage complete migration to a read-only one\
did not fail appropriately. Error was actually : " + str(ex));
self.assertEqual(success, True, "Check if a complete migration to a read-only storage one fails appropriately")
# Try balanced migration from second to the first storage
success = False
try:
self.migrate_secondary_storage(self.apiclient, second_storage_id, first_storage_id, "balance")
except Exception as ex:
if re.search("No destination valid store\(s\) available to migrate.", str(ex)):
success = True
else:
self.debug("Secondary storage balanced migration to a read-only one\
did not fail appropriately. Error was actually : " + str(ex))
self.assertEqual(success, True, "Check if a balanced migration to a read-only storage one fails appropriately")
# Set the first storage back to read-write
self.update_secondary_storage(self.apiclient, first_storage_id, False)
@attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false")
def test_05_migrate_to_less_free_space(self):
"""Test migrations when the destination storage has less space
"""
# Validate the following
# Migration to a secondary storage with less space should be refused
# NOTE: This test requires more than one secondary storage in the system
# 1. Try complete migration from a storage with more (or equal) free space - migration should be refused
storages = self.list_secondary_storages(self.apiclient)
if (len(storages)) < 2:
self.skipTest(
"This test requires more than one secondary storage")
first_storage = self.list_secondary_storages(self.apiclient)[0]
first_storage_disksizeused = first_storage['disksizeused']
first_storage_disksizetotal = first_storage['disksizetotal']
second_storage = self.list_secondary_storages(self.apiclient)[1]
second_storage_disksizeused = second_storage['disksizeused']
second_storage_disksizetotal = second_storage['disksizetotal']
first_storage_freespace = first_storage_disksizetotal - first_storage_disksizeused
second_storage_freespace = second_storage_disksizetotal - second_storage_disksizeused
if first_storage_freespace == second_storage_freespace:
self.skipTest(
"This test requires two secondary storages with different free space")
# Setting the storage | |
from django.apps import apps
from django.contrib.auth.management import create_permissions
from project import settings
# def post_migrate_create_organization(sender, **kwargs):
# for app_config in apps.get_app_configs():
# create_permissions(app_config, apps=apps, verbosity=0)
#
# Organization = sender.get_model("Organization")
# org, created = Organization.objects.get_or_create(
# name="MIT", url="https://lookit.mit.edu"
# )
def post_migrate_create_social_app(sender, **kwargs):
Site = apps.get_model("sites.Site")
SocialApp = apps.get_model("socialaccount.SocialApp")
site = Site.objects.first()
site.domain = settings.SITE_DOMAIN
site.name = settings.SITE_NAME
site.save()
if not SocialApp.objects.exists():
app = SocialApp.objects.create(
key="",
name="OSF",
provider="osf",
# Defaults are valid for staging
client_id=settings.OSF_OAUTH_CLIENT_ID,
secret=settings.OSF_OAUTH_SECRET,
)
app.sites.clear()
app.sites.add(site)
def post_migrate_create_flatpages(sender, **kwargs):
Site = apps.get_model("sites.Site")
FlatPage = apps.get_model("flatpages.FlatPage")
flatpages = [
dict(
url="/",
title="Home",
content=f"""
<div class="main">
<div class="home-jumbotron">
<div class="content">
<h1>Lookit<br>
<small>the online child lab</small></h1>
<p>A project of the MIT Early Childhood Cognition Lab</p><a class="btn btn-primary btn-lg ember-view" href="/studies" id="ember821">Participate in a Study</a>
</div>
</div>
<div class="information-row lookit-row">
<div class="container">
<div class="row">
<div class="col-md-4">
<div class="home-content-icon">
<i class="fa fa-flask"></i>
</div>
<h3 class="text-center">Bringing science home</h3>
<p>Here at MIT's Early Childhood Cognition Lab, we're trying a new approach in developmental psychology: bringing the experiments to you.</p>
</div>
<div class="col-md-4">
<div class="home-content-icon">
<i class="fa fa-cogs"></i>
</div>
<h3 class="text-center">Help us understand how your child thinks</h3>
<p>Our online studies are quick and fun, and let you as a parent contribute to our collective understanding of the fascinating phenomenon of children's learning. In some experiments you'll step into the role of a researcher, asking your child questions or controlling the experiment based on what he or she does.</p>
</div>
<div class="col-md-4">
<div class="home-content-icon">
<i class="fa fa-coffee"></i>
</div>
<h3 class="text-center">Participate whenever and wherever</h3>
<p>Log in or create an account at the top right to get started! You can participate in studies from home by doing an online activity with your child that is videotaped via your webcam.</p>
</div>
</div>
</div>
</div>
<div class="news-row lookit-row">
<div class="container">
<div class="row">
<h3>News</h3>
<div class="col-xs-12">
<div class="row">
<div class="col-md-2 col-md-offset-1">
March 30, 2017
</div>
<div class="col-md-7">
Our two papers describing online replications of classic developmental studies on a prototype of the Lookit system are now available in the <a href="http://www.mitpressjournals.org/doi/abs/10.1162/OPMI_a_00002#.WN2QeY61vtc">first issue of Open Mind</a>, a new open-access journal from MIT Press! Thank you so much to all of our early participants who made this work possible.
</div>
</div>
<div class="row">
<div class="col-md-2 col-md-offset-1">
September 16, 2016
</div>
<div class="col-md-7">
We're back up and running! If you had an account on the old site, you should have received an email letting you know how to access your new account. We're getting started by piloting a study about babies' intuitive understanding of physics!
</div>
</div>
<div class="row">
<div class="col-md-2 col-md-offset-1">
August 4, 2016
</div>
<div class="col-md-7">
Lookit is taking a break while our partners at the Center for Open Science work on re-engineering the site so it's easier for both parents and researchers to use. We're looking forward to re-opening the login system and starting up some new studies early this fall! Please contact <EMAIL> with any questions.
</div>
</div>
<div class="row">
<div class="col-md-2 col-md-offset-1">
October 1, 2015
</div>
<div class="col-md-7">
We've finished collecting data for replications of three classic studies, looking at infants' and children's understanding of probability, language, and reliability. The results will be featured here as soon as they're published!
</div>
</div>
<div class="row">
<div class="col-md-2 col-md-offset-1">
June 30, 2014
</div>
<div class="col-md-7">
An MIT News press release discusses Lookit <a href="https://newsoffice.mit.edu/2014/mit-launches-online-lab-early-childhood-learning-lookit">here</a>. The project was also featured in <a href="http://www.bostonmagazine.com/health/blog/2014/06/19/new-mit-lab/">Boston Magazine</a> and on the <a href="https://www.sciencenews.org/blog/growth-curve/your-baby-can-watch-movies-science">Science News blog</a>. Stay up-to-date and connect with other science-minded parents through our <a href="https://www.facebook.com/lookit.mit.edu">Facebook page</a>!
</div>
</div>
<div class="row">
<div class="col-md-2 col-md-offset-1">
February 5, 2014
</div>
<div class="col-md-7">
Beta testing of Lookit within the MIT community begins! Many thanks to our first volunteers.
</div>
</div>
</div>
</div>
</div>
</div>
<footer>
<div class="footer-row lookit-row">
<div class="container">
<div class="row">
<div class="col-md-1"><img src="{settings.STATIC_URL}images/nsf.gif"></div>
<div class="col-md-11">
This material is based upon work supported by the National Science Foundation (NSF) under Grant No. 1429216; the Center for Brains, Minds and Machines (CBMM), funded by NSF STC award CCF-1231216, and by an NSF Graduate Research Fellowship under Grant No. 1122374. Any opinion, findings, and conclusions or recommendations expressed in this material are those of the authors(s) and do not necessarily reflect the views of the National Science Foundation.
</div>
</div>
</div>
</div>
</footer>
</div>
""",
),
dict(
url="/faq/",
title="FAQ",
content=f"""
<div class="main">
<div class="lookit-row lookit-page-title">
<div class="container">
<h2>Frequently Asked Questions</h2>
</div>
</div>
<div class="lookit-row faq-row">
<div class="container">
<h3>Participation</h3>
<div class="panel-group" id="accordion" role="tablist">
<div class="panel panel-default">
<div class="panel-heading" role="tab">
<h4 class="panel-title"><a data-toggle="collapse" data-parent="#accordion" href="#collapse1">What is a "study" about cognitive development?</a></h4>
</div>
<div id="collapse1" class="panel-collapse collapse" >
<div class="panel-body">
<div>
<p>Cognitive development is the science of what kids understand and how they learn. Researchers in cognitive development are interested in questions like...</p>
<ul>
<li>what knowledge and abilities infants are born with, and what they have to learn from experience</li>
<li>how abilities like mathematical reasoning are organized and how they develop over time</li>
<li>what strategies children use to learn from the wide variety of data they observe</li>
</ul>
<p>A study is meant to answer a very specific question about how children learn or what they know: for instance, "Do three-month-olds recognize their parents' faces?"</p>
</div>
</div>
</div>
</div>
<div class="panel panel-default">
<div class="panel-heading" role="tab">
<h4 class="panel-title"><a data-toggle="collapse" data-parent="#accordion" href="#collapse2">How can we participate online?</a></h4>
</div>
<div id="collapse2" class="panel-collapse collapse">
<div class="panel-body">
<div>
<p>If you have any children between 3 months and 7 years old and would like to participate, create an account and take a look at what we have available for your child's age range. You'll need a working webcam to participate.</p>
<p>When you select a study, you'll be asked to read a consent form and record yourself stating that you and your child agree to participate. Then we'll guide you through what will happen during the study. Depending on your child's age, your child may answer questions directly or we may be looking for indirect signs of what she thinks is going on--like how long she looks at a surprising outcome.</p>
<p>Some portions of the study will be automatically recorded using your webcam and sent securely to our MIT lab. Trained researchers will watch the video and record your child's responses--for instance, which way he pointed, or how long she looked at each image. We'll put these together with responses from lots of other children to learn more about how kids think!</p>
</div>
</div>
</div>
</div>
<div class="panel panel-default">
<div class="panel-heading collapsed" role="tab">
<h4 class="panel-title"><a data-toggle="collapse" data-parent="#accordion" href="#collapse3">How do we provide consent to participate?</a></h4>
</div>
<div id="collapse3" class="panel-collapse collapse">
<div class="panel-body">
<div>
<p>Rather than having the parent or legal guardian sign a form, we ask that you read aloud (or sign in ASL) a statement of consent which is recorded using your webcam and sent back to our lab. This statement holds the same weight as a signed form, but should be less hassle for you. It also lets us verify that you understand written English and that you understand you're being videotaped.</p>
<p>If we receive a consent form that does NOT clearly demonstrate informed consent--for instance, we see a parent and child but the parent does not read the statement--any other video collected during that session will be deleted without viewing.</p>
<div class="row">
<div class="col-sm-10 col-sm-offset-1 col-md-8 col-md-offset-2 col-lg-6 col-lg-offset-3">
<video controls="true" src="{settings.STATIC_URL}videos/consent.mp4"></video>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="panel panel-default">
<div class="panel-heading" role="tab">
<h4 class="panel-title"><a data-toggle="collapse" data-parent="#accordion" href="#collapse4">How is our information kept confidential?</a></h4>
</div>
<div id="collapse4" class="panel-collapse collapse">
<div class="panel-body">
<div>
<p>We do not publish or use identifying information about individual children or families. We never publish children's names or birthdates (birthdates are used only to figure out how old children are at the time of the study). Your video is transmitted over a secure https connection to our lab and kept on a password-protected server. See 'Who will see our video?'</p>
</div>
</div>
</div>
</div>
<div class="panel panel-default">
<div | |
= getResultPercentage(testSetData)
resultSet.append(["Condition (Multi Agent Attributes)", testSetPercentage, copy.deepcopy(testSetData)])
#Creating source metamemes via the script facade
testSetData = testSourceCreateMeme('SourceCreateMeme.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Meme Creation", testSetPercentage, copy.deepcopy(testSetData)])
#Set a source meme property via the script facade
testSetData = testSourceProperty('SourceProperty.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Meme Property Set", testSetPercentage, copy.deepcopy(testSetData)])
#Delete a source meme property via the script facade
testSetData = testSourcePropertyRemove('SourceProperty.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Meme Property Remove", testSetPercentage, copy.deepcopy(testSetData)])
#Add a member meme via the script facade
testSetData = testSourceMember('SourceMember.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Member Meme Add", testSetPercentage, copy.deepcopy(testSetData)])
#Remove a member meme via the script facade
testSetData = testSourceMemberRemove('SourceMember.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Member Meme Remove", testSetPercentage, copy.deepcopy(testSetData)])
#Add an enhancement via the script facade
testSetData = testSourceEnhancement('SourceEnhancement.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Enhancement Add", testSetPercentage, copy.deepcopy(testSetData)])
#Remove an enhancement via the script facade
testSetData = testSourceEnhancementRemove('SourceEnhancement.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Enhancement Remove", testSetPercentage, copy.deepcopy(testSetData)])
#Set the singleton flag via the script facade
testSetData = testSourceSingletonSet('SourceCreateMeme.atest')
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Editor Singleton Setting", testSetPercentage, copy.deepcopy(testSetData)])
#Create a Generic entity and check to see that it's meme is Graphyne.Generic
testSetData = testGeneric()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Generic Entity", testSetPercentage, copy.deepcopy(testSetData)])
#Test Entity Deletion
testSetData = testDeleteEntity()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Entity Deletion", testSetPercentage, copy.deepcopy(testSetData)])
#Atomic and subatomic links
testSetData = testSubatomicLinks()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Subatomic Links", testSetPercentage, copy.deepcopy(testSetData)])
#getting the cluster member list
testSetData = testGetClusterMembers()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Cluster Member List", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testGetHasCounterpartsByType()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Has Counterparts by Type", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testGetEntityMetaMemeType()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["API method testGetEntityMetaMemeType", testSetPercentage, copy.deepcopy(testSetData)])
testSetData = testInstallExecutor()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["API method testInstallExecutor", testSetPercentage, copy.deepcopy(testSetData)])
#getting the cluster dictionary
testSetData = testGetCluster()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Cluster", testSetPercentage, copy.deepcopy(testSetData)])
#testRevertEntity
testSetData = testRevertEntity()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["API Method revertEntity", testSetPercentage, copy.deepcopy(testSetData)])
#testPropertyChangeEvent
testSetData = testPropertyChangeEvent()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Property Change Event", testSetPercentage, copy.deepcopy(testSetData)])
#testLinkEvent
testSetData = testLinkEvent()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Link Event", testSetPercentage, copy.deepcopy(testSetData)])
#testBrokenEvents
testSetData = testBrokenEvents()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Broken Event", testSetPercentage, copy.deepcopy(testSetData)])
#testLinkEvent
testSetData = testInitializeEvent()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Initialize Event", testSetPercentage, copy.deepcopy(testSetData)])
#testBrokenEvents
testSetData = testRemoveEvent()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Remove Event", testSetPercentage, copy.deepcopy(testSetData)])
#testAtomicSubatomic
testSetData = testAtomicSubatomic()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Atomic and Subatomic", testSetPercentage, copy.deepcopy(testSetData)])
#testGetTraverseReport
testSetData = testGetTraverseReport()
testSetPercentage = getResultPercentage(testSetData)
resultSet.append(["Traverse Report", testSetPercentage, copy.deepcopy(testSetData)])
#endTime = time.time()
#validationTime = endTime - startTime
#publishResults(resultSet, validationTime, css)
return resultSet
#Graph.logQ.put( [logType , logLevel.DEBUG , method , "exiting"])
def smokeTestSet(persistence, lLevel, css, profileName, persistenceArg = None, persistenceType = None, resetDatabase = False, createTestDatabase = False, scaleFactor = 0):
'''
repoLocations = a list of all of the filesystem location that that compose the repository.
useDeaultSchema. I True, then load the 'default schema' of Graphyne
persistenceType = The type of database used by the persistence engine. This is used to determine which flavor of SQL syntax to use.
Enumeration of Possible values:
Default to None, which is no persistence
"sqlite" - Sqlite3
"mssql" - Miscrosoft SQL Server
"hana" - SAP Hana
persistenceArg = the Module/class supplied to host the entityRepository and LinkRepository. If default, then use the Graphyne.DatabaseDrivers.NonPersistent module.
Enumeration of possible values:
None - May only be used in conjunction with "sqlite" as persistenceType and will throw an InconsistentPersistenceArchitecture otherwise
"none" - no persistence. May only be used in conjunction with "sqlite" as persistenceType and will throw an InconsistentPersistenceArchitecture otherwise
"memory" - Use SQLite in in-memory mode (connection = ":memory:")
"<valid filename with .sqlite as extension>" - Use SQLite, with that file as the database
"<filename with .sqlite as extension, but no file>" - Use SQLite and create that file to use as the DB file
"<anything else>" - Presume that it is a pyodbc connection string and throw a InconsistentPersistenceArchitecture exception if the dbtype is "sqlite".
createTestDatabase = a flag for creating regression test data. This flag is only to be used for regression testing the graph and even then, only if the test
database does not already exist.
scaleFactor = Scale factor (S). Given N non-singleton memes, N*S "ballast" entities will be created in the DB before starting the test suite. This allows us
to use larger datasets to test scalability (at least with regards to entity repository size)
*If persistenceType is None (no persistence, then this is ignored and won't throw any InconsistentPersistenceArchitecture exceptions)
'''
global testImplicit
print(("\nStarting Graphyne Smoke Test: %s") %(persistence.__name__))
print(("...%s: Engine Start") %(persistence.__name__))
#Only test implicit memes in the case that we are using persistence
if persistenceType is None:
testImplicit = False
#Don't validate the repo when we are performance testing
if scaleFactor < 1:
validateOnLoad = True
else:
validateOnLoad = False
time.sleep(10.0)
installFilePath = os.path.dirname(__file__)
testRepo = os.path.join(installFilePath, "Config", "Test", "TestRepository")
#mainAngRepo = os.path.join(os.environ['ANGELA_HOME'], "RMLRepository")
try:
Graph.startLogger(lLevel)
Graph.startDB([testRepo], persistenceType, persistenceArg, True, resetDatabase, True, validateOnLoad)
except Exception as e:
print(("Graph not started. Traceback = %s" %e))
raise e
print(("...Engine Started: %s") %persistence.__name__)
time.sleep(30.0)
print(("...%s: Engine Started") %(persistence.__name__))
#If scaleFactor > 0, then we are also testing performance
if (scaleFactor > 0):
print("Performance Test: ...Creating Content")
for unusedj in range(1, scaleFactor):
for moduleID in Graph.templateRepository.modules.keys():
if moduleID != "BrokenExamples":
#The module BrokenExamples contaons mmemes that are deliberately malformed. Don't beother with these
module = Graph.templateRepository.modules[moduleID]
for listing in module:
template = Graph.templateRepository.resolveTemplateAbsolutely(listing[1])
if template.className == "Meme":
if template.isSingleton != True:
try:
unusedEntityID = Graph.api.createEntityFromMeme(template.path.fullTemplatePath)
except Exception as e:
pass
print("Performance Test: Finished Creating Content")
# /Scale Factor'
entityCount = Graph.countEntities()
startTime = time.time()
try:
resultSet = runTests(css)
except Exception as e:
print(("test run problem. Traceback = %s" %e))
raise e
endTime = time.time()
validationTime = endTime - startTime
testReport = {"resultSet" : resultSet, "validationTime" : validationTime, "persistence" : persistence.__name__, "profileName" : profileName, "entityCount" : entityCount}
#publishResults(resultSet, validationTime, css)
print(("...%s: Test run finished. Waiting 30 seconds for log thread to catch up before starting shutdown") %(persistence.__name__))
time.sleep(30.0)
print(("...%s: Engine Stop (%s)") %(persistence.__name__, profileName))
Graph.stopLogger()
print(("...%s: Engine Stopped (%s)") %(persistence.__name__, profileName))
return testReport
if __name__ == "__main__":
print("\nStarting Graphyne Smoke Test")
parser = argparse.ArgumentParser(description="Graphyne Smoke Test")
parser.add_argument("-l", "--logl", type=str, help="|String| Graphyne's log level during the validation run. \n Options are (in increasing order of verbosity) 'warning', 'info' and 'debug'. \n Default is 'warning'")
parser.add_argument("-r", "--resetdb", type=str, help="|String| Reset the esisting persistence DB This defaults to true and is only ever relevant when Graphyne is using relational database persistence.")
parser.add_argument("-d", "--dbtype", type=str, help="|String| The database type to be used. If --dbtype is a relational database, it will also determine which flavor of SQL syntax to use.\n Possible options are 'none', 'sqlite', 'mssql' and 'hana'. \n Default is 'none'")
parser.add_argument("-c", "--dbtcon", type=str, help="|String| The database connection string (if a relational DB) or filename (if SQLite).\n 'none' - no persistence. This is the default value\n 'memory' - Use SQLite in in-memory mode (connection = ':memory:') None persistence defaults to memory id SQlite is used\n '<valid filename>' - Use SQLite, with that file as the database\n <filename with .sqlite as extension, but no file> - Use SQLite and create that file to use as the DB file\n <anything else> - Presume that it is a pyodbc connection string")
args = parser.parse_args()
lLevel = Graph.logLevel.WARNING
if args.logl:
if args.logl == "info":
lLevel = Graph.logLevel.INFO
print("\n -- log level = 'info'")
elif args.logl == "debug":
lLevel = Graph.logLevel.DEBUG
print("\n -- log level = 'debug'")
elif args.logl == "warning":
pass
else:
print("Invalid log level %s! Permitted valies of --logl are 'warning', 'info' and 'debug'!" %args.logl)
sys.exit()
persistenceType = None
if args.dbtype:
if (args.dbtype is None) or (args.dbtype == 'none'):
pass
elif (args.dbtype == 'sqlite') or (args.dbtype == 'mssql') or (args.dbtype == 'hana'):
persistenceType = args.dbtype
print("\n -- using | |
102, 195, 251, 219, 219, 126, 60],
9718: [60, 102, 195, 223, 219, 219, 126, 60],
9719: [60, 126, 219, 219, 223, 195, 102, 60],
9720: [255, 198, 204, 216, 240, 224, 192, 128],
9721: [255, 99, 51, 27, 15, 7, 3, 1],
9722: [1, 3, 7, 15, 27, 51, 99, 255],
9723: [0, 124, 108, 108, 108, 124, 0, 0],
9724: [0, 124, 124, 124, 124, 124, 0, 0],
9725: [0, 0, 60, 36, 36, 60, 0, 0],
9726: [0, 0, 60, 60, 60, 60, 0, 0],
9727: [128, 192, 224, 240, 216, 204, 198, 255],
9765: [124, 198, 108, 56, 254, 56, 56, 0],
9768: [24, 60, 24, 126, 24, 24, 24, 0],
9775: [0, 126, 129, 177, 243, 255, 126, 0],
9776: [0, 0, 254, 0, 254, 0, 254, 0],
9777: [0, 0, 198, 0, 254, 0, 254, 0],
9778: [0, 0, 254, 0, 198, 0, 254, 0],
9779: [0, 0, 198, 0, 198, 0, 254, 0],
9780: [0, 0, 254, 0, 254, 0, 198, 0],
9781: [0, 0, 198, 0, 254, 0, 198, 0],
9782: [0, 0, 254, 0, 198, 0, 198, 0],
9783: [0, 0, 198, 0, 198, 0, 198, 0],
9785: [126, 129, 165, 129, 153, 189, 129, 126],
9786: [126, 129, 165, 129, 189, 153, 129, 126],
9787: [126, 255, 219, 255, 195, 231, 255, 126],
9824: [16, 56, 124, 254, 254, 56, 124, 0],
9825: [108, 254, 198, 198, 108, 56, 16, 0],
9826: [16, 56, 108, 198, 108, 56, 16, 0],
9827: [16, 56, 84, 254, 84, 16, 56, 0],
9828: [16, 56, 108, 198, 238, 56, 124, 0],
9829: [108, 254, 254, 254, 124, 56, 16, 0],
9830: [16, 56, 124, 254, 124, 56, 16, 0],
9831: [56, 56, 198, 198, 238, 40, 124, 0],
9833: [12, 12, 12, 12, 12, 60, 124, 56],
9834: [24, 28, 30, 27, 24, 120, 248, 112],
9835: [127, 99, 99, 99, 99, 103, 230, 192],
9836: [127, 99, 127, 99, 99, 103, 230, 192],
9866: [0, 0, 0, 0, 0, 0, 254, 0],
9867: [0, 0, 0, 0, 0, 0, 198, 0],
9868: [0, 0, 0, 0, 254, 0, 254, 0],
9869: [0, 0, 0, 0, 198, 0, 254, 0],
9870: [0, 0, 0, 0, 254, 0, 198, 0],
9871: [0, 0, 0, 0, 198, 0, 198, 0],
9898: [0, 0, 60, 66, 66, 66, 60, 0],
9899: [0, 0, 60, 126, 126, 126, 60, 0],
9900: [0, 56, 124, 108, 124, 56, 0, 0],
9992: [48, 56, 156, 255, 255, 156, 56, 48],
10036: [146, 84, 56, 254, 56, 84, 146, 0],
10240: [0, 0, 0, 0, 0, 0, 0, 0],
10241: [240, 240, 0, 0, 0, 0, 0, 0],
10242: [0, 0, 240, 240, 0, 0, 0, 0],
10243: [240, 240, 240, 240, 0, 0, 0, 0],
10244: [0, 0, 0, 0, 240, 240, 0, 0],
10245: [240, 240, 0, 0, 240, 240, 0, 0],
10246: [0, 0, 240, 240, 240, 240, 0, 0],
10247: [240, 240, 240, 240, 240, 240, 0, 0],
10248: [15, 15, 0, 0, 0, 0, 0, 0],
10249: [255, 255, 0, 0, 0, 0, 0, 0],
10250: [15, 15, 240, 240, 0, 0, 0, 0],
10251: [255, 255, 240, 240, 0, 0, 0, 0],
10252: [15, 15, 0, 0, 240, 240, 0, 0],
10253: [255, 255, 0, 0, 240, 240, 0, 0],
10254: [15, 15, 240, 240, 240, 240, 0, 0],
10255: [255, 255, 240, 240, 240, 240, 0, 0],
10256: [0, 0, 15, 15, 0, 0, 0, 0],
10257: [240, 240, 15, 15, 0, 0, 0, 0],
10258: [0, 0, 255, 255, 0, 0, 0, 0],
10259: [240, 240, 255, 255, 0, 0, 0, 0],
10260: [0, 0, 15, 15, 240, 240, 0, 0],
10261: [240, 240, 15, 15, 240, 240, 0, 0],
10262: [0, 0, 255, 255, 240, 240, 0, 0],
10263: [240, 240, 255, 255, 240, 240, 0, 0],
10264: [15, 15, 15, 15, 0, 0, 0, 0],
10265: [255, 255, 15, 15, 0, 0, 0, 0],
10266: [15, 15, 255, 255, 0, 0, 0, 0],
10267: [255, 255, 255, 255, 0, 0, 0, 0],
10268: [15, 15, 15, 15, 240, 240, 0, 0],
10269: [255, 255, 15, 15, 240, 240, 0, 0],
10270: [15, 15, 255, 255, 240, 240, 0, 0],
10271: [255, 255, 255, 255, 240, 240, 0, 0],
10272: [0, 0, 0, 0, 15, 15, 0, 0],
10273: [240, 240, 0, 0, 15, 15, 0, 0],
10274: [0, 0, 240, 240, 15, 15, 0, 0],
10275: [240, 240, 240, 240, 15, 15, 0, 0],
10276: [0, 0, 0, 0, 255, 255, 0, 0],
10277: [240, 240, 0, 0, 255, 255, 0, 0],
10278: [0, 0, 240, 240, 255, 255, 0, 0],
10279: [240, 240, 240, 240, 255, 255, 0, 0],
10280: [15, 15, 0, 0, 15, 15, 0, 0],
10281: [255, 255, 0, 0, 15, 15, 0, 0],
10282: [15, 15, 240, 240, 15, 15, 0, 0],
10283: [255, 255, 240, 240, 15, 15, 0, 0],
10284: [15, 15, 0, 0, 255, 255, 0, 0],
10285: [255, 255, 0, 0, 255, 255, 0, 0],
10286: [15, 15, 240, 240, 255, 255, 0, 0],
10287: [255, 255, 240, 240, 255, 255, 0, 0],
10288: [0, 0, 15, 15, 15, 15, 0, 0],
10289: [240, 240, 15, 15, 15, 15, 0, 0],
10290: [0, 0, 255, 255, 15, 15, 0, 0],
10291: [240, 240, 255, 255, 15, 15, 0, 0],
10292: [0, 0, 15, 15, 255, 255, 0, 0],
10293: [240, 240, 15, 15, 255, 255, 0, 0],
10294: [0, 0, 255, 255, 255, 255, 0, 0],
10295: [240, 240, 255, 255, 255, 255, 0, 0],
10296: [15, 15, 15, 15, 15, 15, 0, 0],
10297: [255, 255, 15, 15, 15, 15, 0, 0],
10298: [15, 15, 255, 255, 15, 15, 0, 0],
10299: [255, 255, 255, 255, 15, 15, 0, 0],
10300: [15, 15, 15, 15, 255, 255, 0, 0],
10301: [255, 255, 15, 15, 255, 255, 0, 0],
10302: [15, 15, 255, 255, 255, 255, 0, 0],
10303: [255, 255, 255, 255, 255, 255, 0, 0],
10304: [0, 0, 0, 0, 0, 0, 240, 240],
10305: [240, 240, 0, 0, 0, 0, 240, 240],
10306: [0, 0, 240, 240, 0, 0, 240, 240],
10307: [240, 240, 240, 240, 0, 0, 240, 240],
10308: [0, 0, 0, 0, 240, 240, 240, 240],
10309: [240, 240, 0, 0, 240, 240, 240, 240],
10310: [0, 0, 240, 240, 240, 240, 240, 240],
10311: [240, 240, 240, 240, 240, 240, 240, 240],
10312: [15, 15, 0, 0, 0, 0, 240, 240],
10313: [255, 255, 0, 0, 0, 0, 240, 240],
10314: [15, 15, 240, 240, 0, 0, 240, 240],
10315: [255, 255, 240, 240, 0, 0, 240, 240],
10316: [15, 15, 0, 0, 240, 240, 240, 240],
10317: [255, 255, 0, 0, 240, 240, 240, 240],
10318: [15, 15, 240, 240, 240, 240, 240, 240],
10319: [255, 255, 240, 240, 240, 240, 240, 240],
10320: [0, 0, 15, 15, 0, 0, 240, 240],
10321: [240, 240, 15, 15, 0, 0, 240, 240],
10322: [0, 0, 255, 255, 0, 0, 240, 240],
10323: [240, 240, 255, 255, 0, 0, 240, 240],
10324: [0, 0, 15, 15, 240, 240, 240, 240],
10325: [240, 240, 15, 15, 240, 240, 240, 240],
10326: [0, 0, 255, 255, 240, 240, 240, 240],
10327: [240, 240, 255, 255, 240, 240, 240, 240],
10328: [15, 15, 15, 15, 0, 0, 240, 240],
10329: [255, 255, 15, 15, 0, 0, 240, 240],
10330: [15, 15, 255, 255, 0, 0, 240, 240],
10331: [255, 255, 255, 255, 0, 0, 240, 240],
10332: [15, 15, 15, 15, 240, 240, 240, 240],
10333: [255, 255, 15, 15, 240, 240, 240, 240],
10334: [15, 15, 255, | |
import copy
import datetime
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Sized, Callable
from googleapiwrapper.gmail_api import ThreadQueryResults
from pythoncommons.file_utils import FileUtils
from pythoncommons.html_utils import HtmlGenerator
from pythoncommons.result_printer import (
TabulateTableFormat,
GenericTableWithHeader,
ResultPrinter,
DEFAULT_TABLE_FORMATS,
TableRenderingConfig,
)
from pythoncommons.string_utils import StringUtils, auto_str
from yarndevtools.commands.unittestresultaggregator.common import (
get_key_by_testcase_filter,
OperationMode,
SummaryMode,
TestCaseFilter,
FailedTestCase,
FailedTestCaseAggregated,
BuildComparisonResult,
)
from yarndevtools.constants import (
REPORT_FILE_DETAILED_HTML,
REPORT_FILE_DETAILED_TXT,
REPORT_FILE_SHORT_HTML,
REPORT_FILE_SHORT_TXT,
)
LOG = logging.getLogger(__name__)
class TableOutputFormat(Enum):
REGULAR = "regular"
HTML = "html"
REGULAR_WITH_COLORS = "regular_colorized"
class TableDataType(Enum):
MATCHED_LINES = "matched lines per thread"
MATCHED_LINES_AGGREGATED = "matched lines aggregated"
MAIL_SUBJECTS = "found mail subjects"
UNIQUE_MAIL_SUBJECTS = "found unique mail subjects"
LATEST_FAILURES = "latest failures"
TESTCASES_TO_JIRAS = "testcases to jiras"
UNKNOWN_FAILURES = "unknown failures"
REOCCURRED_FAILURES = "reoccurred failures"
BUILD_COMPARISON = "build comparison"
def __init__(self, key, header_value=None):
self.key = key
if not header_value:
header_value = key.upper()
self.header = header_value
@dataclass
class OutputFormatRules:
truncate_length: bool
abbrev_tc_package: str or None
truncate_subject_with: str or None
# TODO Get rid of this later?
@auto_str
class UnitTestResultAggregatorTableRenderingConfig(TableRenderingConfig):
def __init__(
self,
data_type: TableDataType = None,
testcase_filters: List[TestCaseFilter] or None = None,
header: List[str] = None,
table_types: List[TableOutputFormat] = None,
out_fmt: OutputFormatRules or None = None,
row_callback=None,
tabulate_formats: List[TabulateTableFormat] = DEFAULT_TABLE_FORMATS,
simple_mode=False,
max_width=200,
max_width_separator=" ",
add_row_numbers=False,
print_result=False,
):
super().__init__(row_callback, tabulate_formats=tabulate_formats)
self.print_result = print_result
self.add_row_numbers = add_row_numbers
self.max_width_separator = max_width_separator
self.max_width = max_width
self.testcase_filters = [] if not testcase_filters else testcase_filters
self.header = header
self.data_type = data_type
self.table_types = table_types
self.out_fmt = out_fmt
self.simple_mode = simple_mode
LOG.info(
f"Testcase filters for data type '{self.data_type}': {[tcf.short_str() for tcf in self.testcase_filters]}"
)
class SummaryGenerator:
jira_crosscheck_headers = ["Known failure?", "Reoccurred failure?"]
matched_testcases_all_header = ["Date", "Subject", "Testcase", "Message ID", "Thread ID"]
matched_testcases_aggregated_header_basic = [
"Testcase",
"TC parameter",
"Frequency of failures",
"Latest failure",
]
matched_testcases_aggregated_header_full = matched_testcases_aggregated_header_basic + jira_crosscheck_headers
def __init__(self, table_renderer):
self.table_renderer = table_renderer
self._callback_dict: Dict[TableOutputFormat, Callable] = {
TableOutputFormat.REGULAR: self._regular_table,
TableOutputFormat.REGULAR_WITH_COLORS: self._colorized_table,
TableOutputFormat.HTML: self._html_table,
}
@classmethod
def process_testcase_filter_results(
cls, tc_filter_results, query_result: ThreadQueryResults, config, output_manager
):
if config.summary_mode != SummaryMode.NONE.value:
# TODO fix
# truncate = self.config.operation_mode == OperationMode.PRINT
truncate = True if config.summary_mode == SummaryMode.TEXT.value else False
# We apply the specified truncation / abbreviation rules only for TEXT based tables
# HTML / Gsheet output is just fine with longer names.
# If SummaryMode.ALL is used, we leave all values intact for simplicity.
if config.abbrev_tc_package or config.truncate_subject_with:
if config.summary_mode in [SummaryMode.ALL.value, SummaryMode.HTML.value]:
LOG.warning(
f"Either abbreviate package or truncate subject is enabled "
f"but SummaryMode is set to '{config.summary_mode}'. "
"Leaving all data intact so truncate / abbreviate options are ignored."
)
config.abbrev_tc_package = None
config.truncate_subject_with = None
data_dict: Dict[TableDataType, Callable[[TestCaseFilter, OutputFormatRules], List[List[str]]]] = {
TableDataType.MATCHED_LINES: lambda tcf, out_fmt: DataConverter.convert_data_to_rows(
tc_filter_results.get_failed_testcases_by_filter(tcf),
out_fmt,
),
TableDataType.MATCHED_LINES_AGGREGATED: lambda tcf, out_fmt: DataConverter.render_aggregated_rows_table(
tc_filter_results.get_aggregated_testcases_by_filter(tcf),
out_fmt,
),
TableDataType.MAIL_SUBJECTS: lambda tcf, out_fmt: DataConverter.convert_email_subjects(query_result),
TableDataType.UNIQUE_MAIL_SUBJECTS: lambda tcf, out_fmt: DataConverter.convert_unique_email_subjects(
query_result
),
TableDataType.LATEST_FAILURES: lambda tcf, out_fmt: DataConverter.render_latest_failures_table(
tc_filter_results.get_latest_failed_testcases_by_filter(tcf)
),
TableDataType.BUILD_COMPARISON: lambda tcf, out_fmt: DataConverter.render_build_comparison_table(
tc_filter_results.get_build_comparison_result_by_filter(tcf)
),
TableDataType.UNKNOWN_FAILURES: lambda tcf, out_fmt: DataConverter.render_aggregated_rows_table(
tc_filter_results.get_aggregated_testcases_by_filter(tcf, filter_unknown=True),
out_fmt,
basic_mode=True,
),
TableDataType.REOCCURRED_FAILURES: lambda tcf, out_fmt: DataConverter.render_aggregated_rows_table(
tc_filter_results.get_aggregated_testcases_by_filter(tcf, filter_reoccurred=True),
out_fmt,
basic_mode=True,
),
TableDataType.TESTCASES_TO_JIRAS: lambda tcf, out_fmt: DataConverter.render_aggregated_rows_table(
tc_filter_results.get_aggregated_testcases_by_filter(tcf), out_fmt
),
}
detailed_render_confs = cls.detailed_render_confs(config, truncate)
short_render_confs = cls.short_render_confs(config, truncate)
detailed_report_files: Dict[SummaryMode, str] = {
SummaryMode.HTML: REPORT_FILE_DETAILED_HTML,
SummaryMode.TEXT: REPORT_FILE_DETAILED_TXT,
}
short_report_files: Dict[SummaryMode, str] = {
SummaryMode.HTML: REPORT_FILE_SHORT_HTML,
SummaryMode.TEXT: REPORT_FILE_SHORT_TXT,
}
cls._render_reports(config, data_dict, output_manager, short_render_confs, short_report_files)
table_renderer = cls._render_reports(
config, data_dict, output_manager, detailed_render_confs, detailed_report_files
)
# These should be written to files regardless of the SummaryMode setting
output_manager.process_rendered_table_data(table_renderer, TableDataType.MAIL_SUBJECTS)
output_manager.process_rendered_table_data(table_renderer, TableDataType.UNIQUE_MAIL_SUBJECTS)
if config.operation_mode == OperationMode.GSHEET:
# We need to re-generate all the data here, as table renderer might rendered truncated data.
LOG.info("Updating Google sheet with data...")
for tcf in config.testcase_filters.get_non_aggregate_filters():
failed_testcases = tc_filter_results.get_failed_testcases_by_filter(tcf)
table_data = DataConverter.convert_data_to_rows(failed_testcases, OutputFormatRules(False, None, None))
SummaryGenerator._write_to_sheet(
config, "data", cls.matched_testcases_all_header, output_manager, table_data, tcf
)
for tcf in config.testcase_filters.get_aggregate_filters():
failed_testcases = tc_filter_results.get_aggregated_testcases_by_filter(tcf)
table_data = DataConverter.render_aggregated_rows_table(
failed_testcases, OutputFormatRules(False, None, None)
)
SummaryGenerator._write_to_sheet(
config,
f"aggregated data for aggregation filter {tcf}",
cls.matched_testcases_aggregated_header_full,
output_manager,
table_data,
tcf,
)
@classmethod
def _render_reports(cls, config, data_dict, output_manager, render_confs, report_files: Dict[SummaryMode, str]):
LOG.debug(f"Rendering reports by configs: {render_confs}.\n" f"Report files: {report_files}")
text_based_report: bool = config.summary_mode in [SummaryMode.TEXT.value, SummaryMode.ALL.value]
html_report: bool = config.summary_mode in [SummaryMode.HTML.value, SummaryMode.ALL.value]
table_renderer = TableRenderer()
for render_conf in render_confs:
table_renderer.render_by_config(render_conf, data_dict[render_conf.data_type])
summary_generator = SummaryGenerator(table_renderer)
if text_based_report:
regular_summary: str = summary_generator.generate_summary(render_confs, TableOutputFormat.REGULAR)
output_manager.process_regular_summary(regular_summary, report_files[SummaryMode.TEXT])
if html_report:
html_summary: str = summary_generator.generate_summary(render_confs, TableOutputFormat.HTML)
output_manager.process_html_summary(html_summary, report_files[SummaryMode.HTML])
return table_renderer
@classmethod
def short_render_confs(cls, config, truncate) -> List[UnitTestResultAggregatorTableRenderingConfig]:
return [
UnitTestResultAggregatorTableRenderingConfig(
data_type=TableDataType.BUILD_COMPARISON,
header=["Testcase", "Still failing", "Fixed", "New failure"],
testcase_filters=config.testcase_filters.LATEST_FAILURE_FILTERS,
table_types=[TableOutputFormat.REGULAR, TableOutputFormat.HTML],
out_fmt=OutputFormatRules(truncate, config.abbrev_tc_package, config.truncate_subject_with),
),
UnitTestResultAggregatorTableRenderingConfig(
data_type=TableDataType.UNKNOWN_FAILURES,
testcase_filters=config.testcase_filters.get_match_expression_aggregate_filters(),
header=cls.matched_testcases_aggregated_header_basic,
table_types=[TableOutputFormat.REGULAR, TableOutputFormat.HTML],
out_fmt=OutputFormatRules(False, config.abbrev_tc_package, None),
),
UnitTestResultAggregatorTableRenderingConfig(
data_type=TableDataType.REOCCURRED_FAILURES,
testcase_filters=config.testcase_filters.get_match_expression_aggregate_filters(),
header=cls.matched_testcases_aggregated_header_basic,
table_types=[TableOutputFormat.REGULAR, TableOutputFormat.HTML],
out_fmt=OutputFormatRules(False, config.abbrev_tc_package, None),
),
UnitTestResultAggregatorTableRenderingConfig(
data_type=TableDataType.TESTCASES_TO_JIRAS,
testcase_filters=config.testcase_filters.get_match_expression_aggregate_filters(),
header=cls.matched_testcases_aggregated_header_full,
table_types=[TableOutputFormat.REGULAR, TableOutputFormat.HTML],
out_fmt=OutputFormatRules(False, config.abbrev_tc_package, None),
),
]
@classmethod
def detailed_render_confs(cls, config, truncate) -> List[UnitTestResultAggregatorTableRenderingConfig]:
# Render tables in 2 steps
# EXAMPLE SCENARIO / CONFIG:
# match_expression #1 = 'YARN::org.apache.hadoop.yarn', pattern='.*org\\.apache\\.hadoop\\.yarn.*')
# match_expression #2 = 'MR::org.apache.hadoop.mapreduce', pattern='.*org\\.apache\\.hadoop\\.mapreduce.*')
# Aggregation filter #1 = CDPD-7.x
# Aggregation filter #2 = CDPD-7.1.x
# Note: Step numbers are in parentheses
# Failed testcases_ALL --> Global all (1)
# Failed testcases_YARN_ALL (1)
# Failed testcases_MR_ALL (1)
# Failed testcases_YARN_Aggregated_CDPD-7.1.x (2)
# Failed testcases_YARN_Aggregated_CDPD-7.x (2)
# Failed testcases_MR_Aggregated_CDPD-7.1.x (2)
# Failed testcases_MR_Aggregated_CDPD-7.x (2)
return [
# Render tables for all match expressions + ALL values
# --> 3 tables in case of 2 match expressions
UnitTestResultAggregatorTableRenderingConfig(
data_type=TableDataType.MATCHED_LINES,
testcase_filters=config.testcase_filters.get_non_aggregate_filters(),
header=cls.matched_testcases_all_header,
table_types=[TableOutputFormat.REGULAR, TableOutputFormat.HTML],
out_fmt=OutputFormatRules(truncate, config.abbrev_tc_package, config.truncate_subject_with),
),
# Render tables for all match expressions AND all aggregation filters
# --> 4 tables in case of 2 match expressions and 2 aggregate filters
UnitTestResultAggregatorTableRenderingConfig(
data_type=TableDataType.MATCHED_LINES_AGGREGATED,
testcase_filters=config.testcase_filters.get_aggregate_filters(),
header=cls.matched_testcases_aggregated_header_full,
table_types=[TableOutputFormat.REGULAR, TableOutputFormat.HTML],
out_fmt=OutputFormatRules(False, config.abbrev_tc_package, None),
),
UnitTestResultAggregatorTableRenderingConfig(
simple_mode=True,
header=["Subject", "Thread ID"],
data_type=TableDataType.MAIL_SUBJECTS,
table_types=[TableOutputFormat.REGULAR, TableOutputFormat.HTML],
testcase_filters=None,
out_fmt=None,
),
UnitTestResultAggregatorTableRenderingConfig(
simple_mode=True,
header=["Subject"],
data_type=TableDataType.UNIQUE_MAIL_SUBJECTS,
table_types=[TableOutputFormat.REGULAR, TableOutputFormat.HTML],
testcase_filters=None,
out_fmt=None,
),
UnitTestResultAggregatorTableRenderingConfig(
data_type=TableDataType.LATEST_FAILURES,
header=["Testcase", "Failure date", "Subject"],
testcase_filters=config.testcase_filters.LATEST_FAILURE_FILTERS,
table_types=[TableOutputFormat.REGULAR, TableOutputFormat.HTML],
out_fmt=OutputFormatRules(truncate, config.abbrev_tc_package, config.truncate_subject_with),
),
] + SummaryGenerator.short_render_confs(config, truncate)
@staticmethod
def _write_to_sheet(config, data_descriptor, header, output_manager, table_data, tcf):
worksheet_name: str = config.get_worksheet_name(tcf)
LOG.info(
f"Writing GSheet {data_descriptor}. "
f"Worksheet name: {worksheet_name}, "
f"Number of lines will be written: {len(table_data)}"
)
output_manager.update_gsheet(header, table_data, worksheet_name=worksheet_name, create_not_existing=True)
def _regular_table(self, dt: TableDataType, alias=None):
rendered_tables = self.table_renderer.get_tables(
dt, table_fmt=TabulateTableFormat.GRID, colorized=False, alias=alias
)
self._ensure_one_table_found(rendered_tables, dt)
return rendered_tables[0]
def _colorized_table(self, dt: TableDataType, alias=None):
rendered_tables = self.table_renderer.get_tables(
dt, table_fmt=TabulateTableFormat.GRID, colorized=True, alias=alias
)
self._ensure_one_table_found(rendered_tables, dt)
return rendered_tables[0]
def _html_table(self, dt: TableDataType, alias=None):
rendered_tables = self.table_renderer.get_tables(
dt, table_fmt=TabulateTableFormat.HTML, colorized=False, alias=alias
)
self._ensure_one_table_found(rendered_tables, dt)
return rendered_tables[0]
@staticmethod
def _ensure_one_table_found(tables: Sized, dt: TableDataType):
if not tables:
raise ValueError(f"Rendered table not found for Table data type: {dt}")
if len(tables) > 1:
raise ValueError(
f"Multiple result tables are found for table data type: {dt}. "
f"Should have found exactly one table per type."
)
def generate_summary(
self, render_confs: List[UnitTestResultAggregatorTableRenderingConfig], table_output_format: TableOutputFormat
) -> str:
tables: List[GenericTableWithHeader] = []
for conf in render_confs:
for tcf in conf.testcase_filters:
alias = get_key_by_testcase_filter(tcf)
rendered_table = self._callback_dict[table_output_format](conf.data_type, alias=alias)
tables.append(rendered_table)
if conf.simple_mode:
rendered_table = self._callback_dict[table_output_format](conf.data_type, alias=None)
tables.append(rendered_table)
if table_output_format in [TableOutputFormat.REGULAR, TableOutputFormat.REGULAR_WITH_COLORS]:
return self._generate_final_concat_of_tables(tables)
elif table_output_format in [TableOutputFormat.HTML]:
return self._generate_final_concat_of_tables_html(tables)
else:
raise ValueError(f"Invalid state! Table type is not in any of: {[t for t in TableOutputFormat]}")
@staticmethod
def _generate_final_concat_of_tables(tables) -> str:
printable_summary_str: str = ""
for table in tables:
printable_summary_str += str(table)
printable_summary_str += "\n\n"
return printable_summary_str
@staticmethod
def _generate_final_concat_of_tables_html(tables) -> str:
table_tuples = [(ht.header, ht.table) for ht in tables]
html_sep = HtmlGenerator.generate_separator(tag="hr", breaks=2)
return (
HtmlGenerator()
.begin_html_tag()
.add_basic_table_style()
.append_html_tables(
table_tuples, separator=html_sep, header_type="h1", additional_separator_at_beginning=True
)
.render()
)
# TODO Try to extract this to common class (pythoncommons?), BranchComparator should move to this implementation later.
class TableRenderer:
def __init__(self):
self._tables: Dict[str, List[GenericTableWithHeader]] = {}
def render_by_config(
self,
conf: UnitTestResultAggregatorTableRenderingConfig,
data_callable: Callable[[TestCaseFilter or None, OutputFormatRules], List[List[str]]],
):
if conf.simple_mode:
self._render_tables(
header=conf.header,
data=data_callable(None, conf.out_fmt),
dtype=conf.data_type,
formats=conf.tabulate_formats,
)
for tcf in conf.testcase_filters:
key = get_key_by_testcase_filter(tcf)
self._render_tables(
header=conf.header,
data=data_callable(tcf, conf.out_fmt),
dtype=conf.data_type,
formats=conf.tabulate_formats,
append_to_header_title=f"_{key}",
table_alias=key,
)
def _render_tables(
self,
header: List[str],
data: List[List[str]],
dtype: TableDataType,
formats: List[TabulateTableFormat],
colorized=False,
table_alias=None,
append_to_header_title=None,
raise_error_if_header_vs_data_len_mismatched=True,
) -> Dict[TabulateTableFormat, GenericTableWithHeader]:
if not formats:
raise ValueError("Formats should not be empty!")
if raise_error_if_header_vs_data_len_mismatched:
if data and len(header) != len(data[0]):
raise ValueError(
"Mismatch in length of header columns and data columns."
f"Header: {header}, "
f"First row of data table: {data[0]}"
)
render_conf = UnitTestResultAggregatorTableRenderingConfig(
row_callback=lambda row: row,
| |
- ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return s
def func_aafe1dd3eefe46588c6a04ec53f17d6d(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return N
def func_2b80f44fc0fe4599b0f29f82b7d58dfe(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return r
def func_8136c2c855054178a0e74d642193371d(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return D
def func_be066bc3e2f24db4a8cff98e27d9205e(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return q
def func_419aa477399c4133959e23c15bc75550(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return t
def func_e6e276ba5e774b4aaf0e4c447b4fb7dc(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return i
def func_f9c03cc188834c0bac4b556e10d984f3(infile):
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return b
def func_d32823bcf0f54f5aa5fc66c6480f7e40():
infile = open('codejam/test_files/Y14R5P1/A.in')
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return t
def func_fd450b8e8bbe4256a2fee67704a8d691():
infile = open('codejam/test_files/Y14R5P1/A.in')
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return ans
def func_4ccf53b13e6b4652a8672d867b0d2e76():
infile = open('codejam/test_files/Y14R5P1/A.in')
for test in range(int(infile.readline())):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
print 'Case #%s: %.16f' % (test + 1, ans)
infile.close()
return A
def func_8ffc3bde3c2a4590b3568d35be214df7():
infile = open('codejam/test_files/Y14R5P1/A.in')
for | |
kmeans.cluster_centers_
else:
return None, None
def _log_interesting_stats(self, stats):
"""
# Provide interesting insights about the data to the user and send them to the logging server in order for it to generate charts
:param stats: The stats extracted up until this point for all columns
"""
for col_name in stats:
col_stats = stats[col_name]
# Overall quality
if col_stats['quality_score'] < 6:
# Some scores are not that useful on their own, so we should only warn users about them if overall quality is bad.
self.log.warning('Column "{}" is considered of low quality, the scores that influenced this decision will be listed below')
if 'duplicates_score' in col_stats and col_stats['duplicates_score'] < 6:
duplicates_percentage = col_stats['duplicates_percentage']
w = f'{duplicates_percentage}% of the values in column {col_name} seem to be repeated, this might indicate that your data is of poor quality.'
self.log.warning(w)
col_stats['duplicates_score_warning'] = w
else:
col_stats['duplicates_score_warning'] = None
else:
col_stats['duplicates_score_warning'] = None
#Compound scores
if col_stats['consistency_score'] < 3:
w = f'The values in column {col_name} rate poorly in terms of consistency. This means that the data has too many empty values, values with a hard to determine type and duplicate values. Please see the detailed logs below for more info'
self.log.warning(w)
col_stats['consistency_score_warning'] = w
else:
col_stats['consistency_score_warning'] = None
if col_stats['redundancy_score'] < 5:
w = f'The data in the column {col_name} is likely somewhat redundant, any insight it can give us can already by deduced from your other columns. Please see the detailed logs below for more info'
self.log.warning(w)
col_stats['redundancy_score_warning'] = w
else:
col_stats['redundancy_score_warning'] = None
if col_stats['variability_score'] < 6:
w = f'The data in the column {col_name} seems to contain too much noise/randomness based on the value variability. That is to say, the data is too unevenly distributed and has too many outliers. Please see the detailed logs below for more info.'
self.log.warning(w)
col_stats['variability_score_warning'] = w
else:
col_stats['variability_score_warning'] = None
# Some scores are meaningful on their own, and the user should be warnned if they fall below a certain threshold
if col_stats['empty_cells_score'] < 8:
empty_cells_percentage = col_stats['empty_percentage']
w = f'{empty_cells_percentage}% of the values in column {col_name} are empty, this might indicate that your data is of poor quality.'
self.log.warning(w)
col_stats['empty_cells_score_warning'] = w
else:
col_stats['empty_cells_score_warning'] = None
if col_stats['data_type_distribution_score'] < 7:
#self.log.infoChart(stats[col_name]['data_type_dist'], type='list', uid='Dubious Data Type Distribution for column "{}"'.format(col_name))
percentage_of_data_not_of_principal_type = col_stats['data_type_distribution_score'] * 100
principal_data_type = col_stats['data_type']
w = f'{percentage_of_data_not_of_principal_type}% of your data is not of type {principal_data_type}, which was detected to be the data type for column {col_name}, this might indicate that your data is of poor quality.'
self.log.warning(w)
col_stats['data_type_distribution_score_warning'] = w
else:
col_stats['data_type_distribution_score_warning'] = None
if 'z_test_based_outlier_score' in col_stats and col_stats['z_test_based_outlier_score'] < 6:
percentage_of_outliers = col_stats['z_test_based_outlier_score']*100
w = f"""Column {col_name} has a very high amount of outliers, {percentage_of_outliers}% of your data is more than 3 standard deviations away from the mean, this means that there might
be too much randomness in this column for us to make an accurate prediction based on it."""
self.log.warning(w)
col_stats['z_test_based_outlier_score_warning'] = w
else:
col_stats['z_test_based_outlier_score_warning'] = None
if 'lof_based_outlier_score' in col_stats and col_stats['lof_based_outlier_score'] < 4:
percentage_of_outliers = col_stats['percentage_of_log_based_outliers']
w = f"""Column {col_name} has a very high amount of outliers, {percentage_of_outliers}% of your data doesn't fit closely in any cluster using the KNN algorithm (20n) to cluster your data, this means that there might
be too much randomness in this column for us to make an accurate prediction based on it."""
self.log.warning(w)
col_stats['lof_based_outlier_score_warning'] = w
else:
col_stats['lof_based_outlier_score_warning'] = None
if col_stats['value_distribution_score'] < 3:
max_probability_key = col_stats['max_probability_key']
w = f"""Column {col_name} is very biased towards the value {max_probability_key}, please make sure that the data in this column is correct !"""
self.log.warning(w)
col_stats['value_distribution_score_warning'] = w
else:
col_stats['value_distribution_score_warning'] = None
if col_stats['similarity_score'] < 6:
similar_percentage = col_stats['max_similarity'] * 100
similar_col_name = col_stats['most_similar_column_name']
w = f'Column {col_name} and {similar_col_name} are {similar_percentage}% the same, please make sure these represent two distinct features of your data !'
self.log.warning(w)
col_stats['similarity_score_warning'] = w
else:
col_stats['similarity_score_warning'] = None
'''
if col_stats['correlation_score'] < 5:
not_quite_correlation_percentage = col_stats['correlation_score'] * 100
most_correlated_column = col_stats['most_correlated_column']
self.log.warning(f"""Using a statistical predictor we\'v discovered a correlation of roughly {not_quite_correlation_percentage}% between column
{col_name} and column {most_correlated_column}""")
'''
# We might want to inform the user about a few stats regarding his column regardless of the score, this is done below
self.log.info('Data distribution for column "{}"'.format(col_name))
self.log.infoChart(stats[col_name]['data_subtype_dist'], type='list', uid='Data Type Distribution for column "{}"'.format(col_name))
def run(self, input_data, modify_light_metadata, hmd=None, print_logs=True):
"""
# Runs the stats generation phase
# This shouldn't alter the columns themselves, but rather provide the `stats` metadata object and update the types for each column
# A lot of information about the data distribution and quality will also be logged to the server in this phase
"""
''' @TODO Uncomment when we need multiprocessing, possibly disable on OSX
no_processes = multiprocessing.cpu_count() - 2
if no_processes < 1:
no_processes = 1
pool = multiprocessing.Pool(processes=no_processes)
'''
if print_logs == False:
self.log = logging.getLogger('null-logger')
self.log.propagate = False
# we dont need to generate statistic over all of the data, so we subsample, based on our accepted margin of error
population_size = len(input_data.data_frame)
if population_size < 50:
sample_size = population_size
else:
sample_size = int(calculate_sample_size(population_size=population_size, margin_error=self.transaction.lmd['sample_margin_of_error'], confidence_level=self.transaction.lmd['sample_confidence_level']))
#if sample_size > 3000 and sample_size > population_size/8:
# sample_size = min(round(population_size/8),3000)
# get the indexes of randomly selected rows given the population size
input_data_sample_indexes = random.sample(range(population_size), sample_size)
self.log.info('population_size={population_size}, sample_size={sample_size} {percent:.2f}%'.format(population_size=population_size, sample_size=sample_size, percent=(sample_size/population_size)*100))
all_sampled_data = input_data.data_frame.iloc[input_data_sample_indexes]
stats = {}
col_data_dict = {}
for col_name in all_sampled_data.columns.values:
col_data = all_sampled_data[col_name].dropna()
full_col_data = all_sampled_data[col_name]
data_type, curr_data_subtype, data_type_dist, data_subtype_dist, additional_info, column_status = self._get_column_data_type(col_data, input_data.data_frame, col_name)
if column_status == 'Column empty':
if modify_light_metadata:
self.transaction.lmd['malformed_columns']['names'].append(col_name)
self.transaction.lmd['malformed_columns']['indices'].append(i)
continue
new_col_data = []
if curr_data_subtype == DATA_SUBTYPES.TIMESTAMP: #data_type == DATA_TYPES.DATE:
for element in col_data:
if str(element) in [str(''), str(None), str(False), str(np.nan), 'NaN', 'nan', 'NA', 'null']:
new_col_data.append(None)
else:
try:
new_col_data.append(int(parse_datetime(element).timestamp()))
except:
self.log.warning(f'Could not convert string from col "{col_name}" to date and it was expected, instead got: {element}')
new_col_data.append(None)
col_data = new_col_data
if data_type == DATA_TYPES.NUMERIC or curr_data_subtype == DATA_SUBTYPES.TIMESTAMP:
histogram, _ = StatsGenerator.get_histogram(col_data, data_type=data_type, data_subtype=curr_data_subtype)
x = histogram['x']
y = histogram['y']
col_data = StatsGenerator.clean_int_and_date_data(col_data)
# This means the column is all nulls, which we don't handle at the moment
if len(col_data) < 1:
return None
xp = []
if len(col_data) > 0:
max_value = max(col_data)
min_value = min(col_data)
mean = np.mean(col_data)
median = np.median(col_data)
var = np.var(col_data)
skew = st.skew(col_data)
kurtosis = st.kurtosis(col_data)
inc_rate = 0.1
initial_step_size = abs(max_value-min_value)/100
xp += [min_value]
i = min_value + initial_step_size
while i < max_value:
xp += [i]
i_inc = abs(i-min_value)*inc_rate
i = i + i_inc
else:
max_value = 0
min_value = 0
mean = 0
median = 0
var = 0
skew = 0
kurtosis = 0
xp = []
is_float = True if max([1 if int(i) != i else 0 for i in col_data]) == 1 else False
col_stats = {
'data_type': data_type,
'data_subtype': curr_data_subtype,
"mean": mean,
"median": median,
"variance": var,
"skewness": skew,
"kurtosis": kurtosis,
"max": max_value,
"min": min_value,
"is_float": is_float,
"histogram": {
"x": x,
"y": y
},
"percentage_buckets": xp
}
elif data_type == DATA_TYPES.CATEGORICAL or curr_data_subtype == DATA_SUBTYPES.DATE:
histogram, _ = StatsGenerator.get_histogram(input_data.data_frame[col_name], data_type=data_type, data_subtype=curr_data_subtype)
col_stats = {
'data_type': data_type,
'data_subtype': curr_data_subtype,
"histogram": histogram,
"percentage_buckets": histogram['x']
}
elif curr_data_subtype == DATA_SUBTYPES.IMAGE:
histogram, percentage_buckets = StatsGenerator.get_histogram(col_data, data_subtype=curr_data_subtype)
col_stats = {
'data_type': data_type,
'data_subtype': curr_data_subtype,
'percentage_buckets': percentage_buckets,
'histogram': histogram
}
# @TODO This is probably wrong, look into it a bit later
else:
# see if its a sentence or a word
histogram, _ = StatsGenerator.get_histogram(col_data, data_type=data_type, data_subtype=curr_data_subtype)
dictionary = list(histogram.keys())
# if no words, then no dictionary
if len(col_data) == 0:
dictionary_available = False
dictionary_lenght_percentage = 0
dictionary = []
else:
dictionary_available = True
dictionary_lenght_percentage = len(
dictionary) / len(col_data) * 100
# if the number of uniques is too large then treat is a text
is_full_text = True if curr_data_subtype == DATA_SUBTYPES.TEXT else False
if dictionary_lenght_percentage > 10 and len(col_data) > 50 and is_full_text==False:
| |
@param **kw
Further keyword arguments are used in case of
``algo==proper_x_dist`` and are supplied to the x_distance()
method calls.
"""
if algo == 'circumference':
f = self.circumference
elif algo in ('coord', 'proper_x_dist'):
f = lambda param: self(param)[0]
else:
raise ValueError("Unknown algorithm: %s" % algo)
last_param = [None]
def func(param):
param = clip(param, 0, np.pi)
value = f(param)
last_param[0] = param
return value
func_neg = lambda param: -f(clip(param, 0, np.pi))
with self.fix_evaluator():
xa, xb, xc = bracket(func_neg, 0.0, 0.1, grow_limit=2)[:3]
res = minimize_scalar(func_neg, bracket=(xa, xb, xc), options=dict(xtol=1e-1))
x_max = res.x
try:
xa, xb, xc = bracket(func, x_max, x_max+0.1, grow_limit=2)[:3]
if xb < 0 or xc < 0:
# Something went wrong in bracketing. Do it half-manually now.
# This case occurs if the MOTS is *too little* deformed
# such that the "neck" is not very pronounced.
params = self.collocation_points()
xs = [f(x) for x in params]
max1 = next(params[i] for i, x in enumerate(xs)
if xs[i+1] < x)
max2 = next(params[i] for i, x in reversed(list(enumerate(xs)))
if xs[i-1] < x)
xa, xb, xc = bracket(func, max1, max1+0.1*(max2-max1), grow_limit=2)[:3]
except RuntimeError:
# This happens in bipolar coordinates in extremely distorted
# cases where the "neck" is streched over a large parameter
# interval.
x0 = last_param[0]
xa, xb, xc = bracket(func, x0, x0+0.1, grow_limit=5, maxiter=10000)[:3]
if algo == 'proper_x_dist':
func = lambda param: self.x_distance(param, **kw)
xa, xb, xc = bracket(func, xb, xb+1e-2, grow_limit=2)[:3]
res = minimize_scalar(func, bracket=(xa, xb, xc), options=dict(xtol=xtol))
else:
res = minimize_scalar(func, bracket=(xa, xb, xc), options=dict(xtol=xtol))
return res.x, res.fun
def locate_intersection(self, other_curve, xtol=1e-8, domain1=(0, np.pi),
domain2=(0, np.pi), strict1=True, strict2=True,
N1=20, N2=20):
r"""Locate one point at which this curve intersects another curve.
@return Tuple ``(param1, param2)``, where ``param1`` is the parameter
value of this curve and ``param2`` the parameter value of the
`other_curve` at which the two curves have the same location. If
no intersection is found, returns ``(None, None)``.
@param other_curve
Curve to find the intersection with.
@param xtol
Tolerance in curve parameter values for the search. Default is
`1e-8`.
@param domain1,domain2
Optional interval of the curves to consider. Default is the full
curve, i.e. ``(0, pi)`` for both.
@param strict1,strict2
Whether to only allow solutions in the given domains `domain1`,
`domain2`, respectively (default). If either is `False`, the
respective domain is used just for the initial coarse check to
find a starting point. Setting e.g. ``N1=1`` and ``strict1=False``
allows specifying a starting point on this (first) curve.
@param N1,N2
Number of equally spaced rough samples to check for a good
starting point. To avoid running into some local minimal distance
(e.g. at the curve ends), this number should be high enough.
Alternatively (or additionally), one may specify a smaller
domain if there is prior knowledge about the curve shapes.
"""
z_dist = self.z_distance_using_metric(
metric=None, other_curve=other_curve, allow_intersection=True,
)
if z_dist >= 0:
return (None, None)
c1 = self
c2 = other_curve
with c1.fix_evaluator(), c2.fix_evaluator():
space1 = np.linspace(*domain1, N1, endpoint=False)
space1 += (space1[1] - space1[0]) / 2.0
space2 = np.linspace(*domain2, N2, endpoint=False)
space2 += (space2[1] - space2[0]) / 2.0
pts1 = [[c1(la), la] for la in space1]
pts2 = [[c2(la), la] for la in space2]
dists = [[np.linalg.norm(np.asarray(p1)-p2), l1, l2]
for p1, l1 in pts1 for p2, l2 in pts2]
_, la1, la2 = min(dists, key=lambda d: d[0])
dyn_domain1 = domain1 if strict1 else (0.0, np.pi)
dyn_domain2 = domain2 if strict2 else (0.0, np.pi)
def func(x):
la1, la2 = x
p1 = np.asarray(c1(clip(la1, *dyn_domain1)))
p2 = np.asarray(c2(clip(la2, *dyn_domain2)))
return p2 - p1
sol = root(func, x0=[la1, la2], tol=xtol)
if not sol.success:
return (None, None)
la1, la2 = sol.x
if strict1:
la1 = clip(la1, *domain1)
if strict2:
la2 = clip(la2, *domain2)
if np.linalg.norm(func((la1, la2))) > np.sqrt(xtol):
# points are too far apart to be an intersection
return (None, None)
return (la1, la2)
def locate_self_intersection(self, neck=None, xtol=1e-8):
r"""Locate a *loop* in the MOTS around its neck.
@return Two parameter values ``(param1, param2)`` where the curve has
the same location in the x-z-plane. If not loop is found, returns
``(None, None)``.
@param neck
Parameter where the neck is located. If not given, finds the neck
using default arguments of find_neck().
@param xtol
Tolerance in curve parameter values for the search. Default is
`1e-8`.
"""
try:
return self._locate_self_intersection(neck=neck, xtol=xtol)
except _IntersectionDetectionError:
raise
pass
return None, None
def _locate_self_intersection(self, neck, xtol):
r"""Implements locate_self_intersection()."""
if neck is None:
neck = self.find_neck()[0]
with self.fix_evaluator():
match_cache = dict()
def find_matching_param(la1, max_step=None, _recurse=2):
try:
return match_cache[la1]
except KeyError:
pass
if abs(la1-neck) < xtol:
return la1
x1 = self(la1)[0]
def f(la): # x-coord difference
return self(la)[0] - x1
dl = neck - la1
if max_step:
dl = min(dl, max_step)
a = neck + dl
while f(a) > 0:
dl = dl/2.0
a = neck + dl
if abs(dl) < xtol:
return la1
step = min(dl/2.0, max_step) if max_step else (dl/2.0)
b = a + step
while f(b) < 0:
a, b = b, b+step
if b >= np.pi:
if _recurse > 0:
max_step = (max_step/10.0) if max_step else 0.05
return find_matching_param(la1, max_step=max_step,
_recurse=_recurse-1)
# Last resort: seems the x-coordinate is not reached
# on this branch at all. Returning pi guarantees a
# large `delta_z` (albeit with a jump that might be
# problematic). In practice, however, this often works.
return np.pi
# We now have a <= la2 <= b (la2 == matching param)
la2 = brentq(f, a=a, b=b, xtol=xtol)
match_cache[la1] = la2
return la2
def delta_z(la):
la2 = find_matching_param(la)
return self(la)[1] - self(la2)[1]
for step in np.linspace(0.01, np.pi-neck, 100):
# Note that `step>0`, but the minimum lies at `a<neck` the way
# we have defined `delta_z()`. The minimize call will however
# happily turn around...
res = minimize_scalar(delta_z, bracket=(neck, neck+step),
options=dict(xtol=1e-2))
a = res.x
if delta_z(a) < 0:
break
if delta_z(a) >= 0:
raise _IntersectionDetectionError("probably no intersection")
# We start with `shrink=2.0` to make this fully compatible with
# the original strategy (i.e. so that results are reproducible).
# In practice, however, this `step` might be larger than the
# distance to the domain boundaries, making the following
# `brent()` call stop immediately at the wrong point. The cause is
# that, first of all we assume here `a>neck`, but our definition
# of `delta_z()` leads to a minimum at `a<neck`. Secondly, with
# extreme parameterizations (e.g. using bipolar coordinates plus
# `curv2` coordinate space reparameterization), the `neck` region
# could house the vast majority of affine parameters.
step_shrinks = [2.0]
step_shrinks.extend(np.linspace(
(neck-a)/((1-0.9)*a), max(10.0, (neck-a)/((1-0.1)*a)), 10
))
# For the above:
# a + step = x*a, where 0<x<1, step<0
# <=> (a-neck)/shrink = (x-1)*a
# <=> shrink = (a-neck)/(a*(x-1))
# = (neck-a)/(a*(1-x)) > 0
for shrink in step_shrinks:
step = (a-neck)/shrink
b = a + step
while delta_z(b) < 0:
a, b = b, b+step
if not 0 <= b <= np.pi:
raise _IntersectionDetectionError("curve upside down...")
la1 = brentq(delta_z, a=a, b=b, xtol=xtol)
if la1 > 0.0:
break
la2 = find_matching_param(la1)
return la1, la2
def get_distance_function(self, other_curve, Ns=None, xatol=1e-12,
mp_finish=True, dps=50, minima_to_check=1):
r"""Return a callable computing the distance between this and another curve.
The computed distance is the coordinate distance between a point of
this curve to the closest point on another curve.
The returned callable will take one mandatory argument: the parameter
at which to evaluate the current curve. The resulting point is then
taken and the given `other_curve` searched for the point closest to
that point. The distance to this function is then returned.
A second optional parameter of the returned function determines
whether only the distance (`False`, default) or the distance and the
parameter on the other curve is returned (if `True`).
@param other_curve
The curve to which the distance should be computed in the returned
function.
@param Ns
Number of points to take on `other_curve` for finding the initial
guess for the minimum search. | |
#!/usr/bin/env python
"""
Copyright (c) 2016, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
import re
import os
import glob
import ads
import codecs
import argparse
import subprocess
parser = argparse.ArgumentParser()
parser.add_argument('infiles', type=str, nargs='*',
help='List of input PDF file(s)')
parser.add_argument('-catbib', '--catbib', type=str,
help='Name of master bibliography file written')
parser.add_argument('-adstoken', '--adstoken', type=str,
help='ADS token to use. (Creates file ads.token).')
args = parser.parse_args()
class BibtexEntry(object):
"""
Bibtex entry class
"""
def __init__(self, entry):
self.lines = entry # entry is a list of bibtex lines
self.doi = u''
self.bibcode = u''
self.get_doi()
self.get_bibcode()
def __repr__(self):
return self.bibcode
def get_doi(self):
"""
sets BibtexEntry.doi = DOI for this BibtexEntry
"""
re_doi = u'\s*doi\s*=\s*\{(.*)\}.*'
self.doi = self.search_re_lines(re_doi)
def get_bibcode(self):
"""
sets BibtexEntry.bibcode = bibcode for this BibtexEntry
"""
re_bibcode = u'@[^\{]*\{([^,]*),.*'
self.bibcode = self.search_re_lines(re_bibcode)
def search_re_lines(self, regexp):
"""
Searches self.lines for re, returning group(1)
of the match, if found. Otherwise returns an empty string.
"""
rec = re.compile(regexp, re.IGNORECASE)
for l in self.lines:
rem = rec.match(l)
if rem:
return rem.group(1)
else:
return ''
class BibtexCollection(object):
"""
Holds a set of bibtex files, each of which can have multiple
entries
"""
def __init__(self):
self.bib_files = {} # filename is key, list of BibtexEntry is value
self.bibcode_entries = {} # Dictionary of BibtexEntry objects keyed by bibcode
def read_from_string(self, bibtex_string):
bibtex_lines = bibtex_string.split(u'\n')
for bl in bibtex_lines:
bl = bl + u'\n'
self.read_from_lines(bibtex_lines)
def read_from_lines(self, bibtexlines):
self.bib_files["bibtexlines"] = self.get_entries(bibtexlines)
self.make_unique_entries()
def read_from_files(self, bibtexfiles):
"""
Make a BibtexCollection given a list of input files.
"""
for f in bibtexfiles:
# Open and read f
fbib = codecs.open(f, encoding='utf-8')
lines = []
for l in fbib:
lines.append(l)
fbib.close()
# Turn lines into a list of BibtexEntry objects
self.bib_files[f] = self.get_entries(lines)
print('Found {} bibtex files'.format(len(self.bib_files)))
# Gets a unique set of BibtexEntry objects by bibcode
self.make_unique_entries()
print('Found {} unique bibtex entries'.format(len(self.bibcode_entries)))
print(self.bibcode_entries)
def write_unique_entries(self, outfile):
"""
Writes unique entries into a master Bibtex file
"""
self.outfile = outfile
f = codecs.open(self.outfile, encoding='utf-8', mode='w')
for bc, entry in self.bibcode_entries.iteritems():
for l in entry.lines:
f.write(l)
f.write(u'\n\n')
f.close()
def make_unique_entries(self):
"""
Makes a list of unique BibtexEntry objects keyed by bibcode
"""
for f, belist in self.bib_files.iteritems():
for be in belist:
if be.bibcode in self.bibcode_entries.keys():
print('Duplicate bibcode {} - Continuing with replacement ...'.format(be.doi))
self.bibcode_entries[be.bibcode] = be
def get_entries(self, lines):
"""
Turns a list of lines into a list of BibtexEntry objects
"""
entries = []
for bibentry in self.gen_bib_entries(lines):
entries.append(BibtexEntry(bibentry))
return entries
def gen_bib_entries(self, line_list):
"""
Yields each entry in list of unicode bibtex lines
"""
re_entry_start = re.compile(u'@.*')
re_entry_end = re.compile(u'\}')
found_entry = False
for l in line_list:
m_entry_start = re_entry_start.match(l)
if m_entry_start:
found_entry = True
entry = []
if found_entry:
entry.append(l)
m_entry_end = re_entry_end.match(l)
if m_entry_end:
found_entry = False
yield entry
class Document(object):
"""
Class for a document (eg. pdf or ps article)
"""
def __init__(self, file):
"""
Initializes Document using filename 'file'
"""
self.name = file
self.doi = ''
self.arxiv = ''
self.paper = None
self.bibcode = None
self.bibtex = None
# Try to get DOI
self.get_doi()
if not self.doi:
# Try to get arXiv number if no DOI
self.get_arxiv()
# Get bibcode for this paper
if self.doi:
self.query_ads_bibcode({'identifier':self.doi})
elif self.arxiv:
self.query_ads_bibcode({'identifier':self.arxiv})
else:
print('Cannot find {} in ADS with no identifier.'.format(self.name))
def get_doi(self):
"""
Gets DOI identifier from the file.
Sets: self.doi
"""
pdfgrep_doi_re = "doi\s*:\s*[^ \"'\n'\"]*"
pdfgrep_stdout = self.call_pdfgrep(pdfgrep_doi_re)
re_doi = re.compile('(\s*)doi(\s*):(\s*)([^\s\n]*)', re.IGNORECASE)
m = re_doi.match(pdfgrep_stdout)
if m:
self.doi = m.group(4)
if self.doi:
print('Found DOI {} in {}'.format(self.doi, self.name))
else:
print('Could not find DOI in {}'.format(self.name))
def get_arxiv(self):
"""
Gets arXiv identifier from the file.
Sets: self.arxiv
"""
pdfgrep_arx_re = "arXiv:[0-9\.]+v?[0-9]* \[[a-zA-Z-\.]+\] [0-9]{1,2} [a-zA-Z]+ [0-9]{4}"
pdfgrep_stdout = self.call_pdfgrep(pdfgrep_arx_re)
re_arx = re.compile('(arXiv:[0-9\.]+).*', re.IGNORECASE)
m_arxiv = re_arx.match(pdfgrep_stdout)
if m_arxiv:
self.arxiv = m_arxiv.group(1)
if self.arxiv:
print('Found arXiv ID {} in {}'.format(self.arxiv, self.name))
else:
print('Could not find arXiv ID in {}'.format(self.name))
def call_pdfgrep(self, pdfgrep_re):
"""
Calls pdfgrep with regular expression pdfgrep_re (case insensitive)
Returns a tuple corresponding to pdfgrep's (STDOUT, STDERR)
"""
pdfgrep_call = subprocess.Popen(["pdfgrep", "-ioP",
pdfgrep_re,
self.name],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
pdfgrep_stdout, pdfgrep_err = pdfgrep_call.communicate()
if pdfgrep_err:
print('Error in function call_pdfgrep returned from subprocess.Popen:')
print(pdfgrep_err)
exit()
else:
return pdfgrep_stdout
def query_ads_bibcode(self, query):
"""
Query ADS for this paper's bibcode
Uses the dictionary query keyed by argument name expected by ads.SearchQuery
"""
try:
paper_query = ads.SearchQuery(**query)
paper_list = []
for p in paper_query:
paper_list.append(p)
nresults = len(paper_list)
if nresults==0:
print('ERROR: Could not find paper on ADS with query {} for paper {}'.format(query, self.name))
elif nresults==1:
self.paper = paper_list[0]
self.bibcode = self.paper.bibcode
else:
print('ERROR: Found {} results on ADS with query {} for paper {}:'.format(nresults, query, self.name))
for p in paper_list:
print(p.bibcode)
print('-----')
except ads.exceptions.APIResponseError:
print('ERROR: ADS APIResponseError. You probably exceeded your rate limit.')
self.paper = None
raise
def bibtex_lines_to_string(self, lines):
"""
Turn Bibtex lines into a single unicode string.
"""
return u'\n'.join(lines) + u'\n\n'
def save_bibtex(self):
"""
Save Bibtex for this file
"""
if self.paper:
# Add file link to bibtex
file_type = 'PDF'
bibtex_ads = self.bibtex_lines_to_string(self.bibtex)
file_bibtex_string = ':{}:{}'.format(self.name, file_type)
file_bibtex_string = '{' + file_bibtex_string + '}'
file_bibtex_string = ',\n File = {}'.format(file_bibtex_string)
bibtex_last = bibtex_ads[-4:]
bibtex_body = bibtex_ads[:-4]
bibtex_body += unicode(file_bibtex_string)
bibtex = bibtex_body + bibtex_last
# Save bibtex
bibtex_file_name = self.paper.bibcode+'.bib'
fout = open(bibtex_file_name,'w')
fout.write(bibtex)
fout.close()
print('Wrote {} for {}'.format(bibtex_file_name, self.name))
else:
print('No paper information for {}, bibtex not written.'.format(self.name))
class DocumentCollection(object):
"""
Class for a set of documents (eg. pdf or ps articles)
"""
def __init__(self, files):
"""
Initializes DocumentCollection using a list of filenames
"""
self.documents = [Document(f) for f in files]
self.set_document_bibtex()
def set_document_bibtex(self):
"""
Uses query_ads_bibtex to set bibtex for documents in the collection
"""
bibcodes = [d.bibcode for d in self.documents]
bc_ads = self.query_ads_bibtex(bibcodes)
for d in self.documents:
d.bibtex = bc_ads.bibcode_entries[d.bibcode].lines
def query_ads_bibtex(self, bibcodes):
"""
Query ADS for the paper bibtexes specified by a list of bibcodes ('bibcodes')
"""
bc_ads = BibtexCollection()
try:
bibtex_string = ads.ExportQuery(bibcodes=bibcodes, format='bibtex').execute()
bc_ads.read_from_string(bibtex_string)
bibcodes_found = bc_ads.bibcode_entries.keys()
nresults = len(bibcodes_found)
nbibcodes = len(bibcodes)
if nresults==nbibcodes:
return bc_ads
else:
print('WARNING: did not retrieve bibtex for {} bibcodes:'.format(nresults-nbibcodes))
for bc in bibcodes:
if not bc in bibcodes_found:
print(bc)
except ads.exceptions.APIResponseError:
print('ERROR: ADS APIResponseError. You probably exceeded your rate limit.')
raise
class ADSToken(object):
"""
Class for managing the ADS token.
"""
def __init__(self, token=None):
self.token = token
if token:
self.set_ads_token()
else:
self.read_ads_token()
def exists(self):
if ads.config.token:
return True
else:
return False
def set_ads_token(self):
"""
Sets ADS token in file .adstoken
"""
pybib_dir = os.path.dirname(os.path.realpath(__file__))
fads_name = os.path.join(pybib_dir,'.adstoken')
try:
fads = open(fads_name,'w')
except:
print('ERROR: Could not open {} for writing!'.format(fads_name))
fads_name = os.path.join(os.getcwd(),fads_name)
try:
fads = open(fads_name,'w')
except:
print('ERROR: Could not open {} for writing!'.format(fads_name))
exit()
pass
fads.write('ads.config.token = {}\n'.format(self.token))
fads.close()
print('Wrote {}'.format(fads_name))
def read_ads_token(self):
"""
Reads ADS token from | |
(1 - alpha)
x = ts.to_numpy()[:i + 1]
ema = sum(weights * x) / sum(weights)
debias_fact = sum(weights) ** 2 / (sum(weights) ** 2 - sum(weights ** 2))
var = debias_fact * sum(weights * (x - ema) ** 2) / sum(weights)
std[i] = np.sqrt(var)
std[0] = np.NaN
return std
dates = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
date(2019, 1, 7),
date(2019, 1, 8),
]
x = pd.Series([3.0, 2.0, 3.0, 1.0, 3.0, 6.0], index=dates)
result = exponential_std(x)
expected = exp_std_calc(x)
assert_series_equal(result, expected, obj="Exponentially weighted standard deviation")
result = exponential_std(x, 0.8)
expected = exp_std_calc(x, 0.8)
assert_series_equal(result, expected, obj="Exponentially weighted standard deviation weight 1")
def test_var():
dates = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
date(2019, 1, 7),
date(2019, 1, 8),
]
x = pd.Series([3.0, 2.0, 3.0, 1.0, 3.0, 6.0], index=dates)
result = var(x)
expected = pd.Series([np.nan, 0.500000, 0.333333, 0.916667, 0.800000, 2.800000], index=dates)
assert_series_equal(result, expected, obj="var")
result = var(x, Window(2, 0))
expected = pd.Series([np.nan, 0.5, 0.5, 2.0, 2.0, 4.5], index=dates)
assert_series_equal(result, expected, obj="var window 2")
result = var(x, Window('1w', 0))
expected = pd.Series([np.nan, 0.500000, 0.333333, 0.916666, 0.800000, 3.500000], index=dates)
assert_series_equal(result, expected, obj="var window 1w")
def test_cov():
dates = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
date(2019, 1, 7),
date(2019, 1, 8),
]
x = pd.Series([3.0, 2.0, 3.0, 1.0, 3.0, 6.0], index=dates)
y = pd.Series([3.5, 1.8, 2.9, 1.2, 3.1, 5.9], index=dates)
result = cov(x, y)
expected = pd.Series([np.nan, 0.850000, 0.466667, 0.950000, 0.825000, 2.700000], index=dates)
assert_series_equal(result, expected, obj="cov")
result = cov(x, y, Window(2, 0))
expected = pd.Series([np.nan, 0.850000, 0.549999, 1.7000000, 1.900000, 4.200000], index=dates)
assert_series_equal(result, expected, obj="cov window 2")
result = cov(x, y, Window('1w', 0))
expected = pd.Series([np.nan, 0.850000, 0.466667, 0.950000, 0.825000, 3.375000], index=dates)
assert_series_equal(result, expected, obj="cov window 1w")
def test_zscores():
with pytest.raises(MqValueError):
zscores(pd.Series(range(5)), "2d")
assert_series_equal(zscores(pd.Series(dtype=float)), pd.Series(dtype=float))
assert_series_equal(zscores(pd.Series(dtype=float), 1), pd.Series(dtype=float))
assert_series_equal(zscores(pd.Series([1])), pd.Series([0.0]))
assert_series_equal(zscores(pd.Series([1]), Window(1, 0)), pd.Series([0.0]))
dates = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
date(2019, 1, 7),
date(2019, 1, 8),
]
x = pd.Series([3.0, 2.0, 3.0, 1.0, 3.0, 6.0], index=dates)
result = zscores(x)
expected = pd.Series([0.000000, -0.597614, 0.000000, -1.195229, 0.000000, 1.792843], index=dates)
assert_series_equal(result, expected, obj="z-score")
assert_series_equal(result, (x - x.mean()) / x.std(), obj="full series zscore")
result = zscores(x, Window(2, 0))
expected = pd.Series([0.0, -0.707107, 0.707107, -0.707107, 0.707107, 0.707107], index=dates)
assert_series_equal(result, expected, obj="z-score window 2")
assert_series_equal(zscores(x, Window(5, 5)), zscores(x, 5))
result = zscores(x, Window('1w', 0))
expected = pd.Series([0.0, -0.707106, 0.577350, -1.305582, 0.670820, 1.603567], index=dates)
assert_series_equal(result, expected, obj="z-score window 1w")
result = zscores(x, '1w')
expected = pd.Series([1.603567], index=dates[-1:])
assert_series_equal(result, expected, obj='z-score window string 1w')
result = zscores(x, '1m')
expected = pd.Series(dtype=float)
assert_series_equal(result, expected, obj="z-score window too large")
def test_winsorize():
assert_series_equal(winsorize(pd.Series(dtype=float)), pd.Series(dtype=float))
x = generate_series(10000)
# You must use absolute returns here, generate_series uses random absolute returns and as such has a decent chance
# of going negative on a sample of 10k, if it goes negative the relative return will be garbage and test can fail
r = returns(x, type=Returns.ABSOLUTE)
for limit in [1.0, 2.0]:
mu = r.mean()
sigma = r.std()
b_upper = mu + sigma * limit * 1.001
b_lower = mu - sigma * limit * 1.001
assert (True in r.ge(b_upper).values)
assert (True in r.le(b_lower).values)
wr = winsorize(r, limit)
assert (True not in wr.ge(b_upper).values)
assert (True not in wr.le(b_lower).values)
def test_percentiles():
dates = [
date(2019, 1, 1),
date(2019, 1, 2),
date(2019, 1, 3),
date(2019, 1, 4),
date(2019, 1, 7),
date(2019, 1, 8),
]
x = pd.Series([3.0, 2.0, 3.0, 1.0, 3.0, 6.0], index=dates)
y = pd.Series([3.5, 1.8, 2.9, 1.2, 3.1, 6.0], index=dates)
assert_series_equal(percentiles(pd.Series(dtype=float), y), pd.Series(dtype=float))
assert_series_equal(percentiles(x, pd.Series(dtype=float)), pd.Series(dtype=float))
assert_series_equal(percentiles(x, y, Window(7, 0)), pd.Series(dtype=float))
result = percentiles(x, y, 2)
expected = pd.Series([50.0, 50.0, 100.0, 75.0], index=dates[2:])
assert_series_equal(result, expected, obj="percentiles with window length 2")
result = percentiles(x, y, Window(2, 0))
expected = pd.Series([100.0, 0.0, 50.0, 50.0, 100.0, 75.0], index=dates)
assert_series_equal(result, expected, obj="percentiles with window 2 and ramp 0")
result = percentiles(x, y, Window('1w', 0))
expected = pd.Series([100.0, 0.0, 33.333333, 25.0, 100.0, 90.0], index=dates)
assert_series_equal(result, expected, obj="percentiles with window 1w")
result = percentiles(x, y, Window('1w', '3d'))
expected = pd.Series([25.0, 100.0, 90.0], index=dates[3:])
assert_series_equal(result, expected, obj="percentiles with window 1w and ramp 3d")
result = percentiles(x)
expected = pd.Series([50.0, 25.0, 66.667, 12.500, 70.0, 91.667], index=dates)
assert_series_equal(result, expected, obj="percentiles over historical values")
result = percentiles(x, y)
expected = pd.Series([100.0, 0.0, 33.333, 25.0, 100.0, 91.667], index=dates)
assert_series_equal(result, expected, obj="percentiles without window length")
with pytest.raises(ValueError):
percentiles(x, pd.Series(dtype=float), Window(6, 1))
def test_percentile():
with pytest.raises(MqError):
percentile(pd.Series(dtype=float), -1)
with pytest.raises(MqError):
percentile(pd.Series(dtype=float), 100.1)
with pytest.raises(MqTypeError):
percentile(pd.Series(range(5), index=range(5)), 90, "2d")
for n in range(0, 101, 5):
assert percentile(pd.Series(x * 10 for x in range(0, 11)), n) == n
x = percentile(pd.Series(x for x in range(0, 5)), 50, 2)
assert_series_equal(x, pd.Series([1.5, 2.5, 3.5], index=pd.RangeIndex(2, 5)))
x = percentile(pd.Series(dtype=float), 90, "1d")
assert_series_equal(x, pd.Series(dtype=float), obj="Percentile with empty series")
def test_percentile_str():
today = datetime.datetime.now()
days = pd.date_range(today, periods=12, freq='D')
start = pd.Series([29, 56, 82, 13, 35, 53, 25, 23, 21, 12, 15, 9], index=days)
actual = percentile(start, 2, '10d')
expected = pd.Series([12.18, 9.54], index=pd.date_range(today + datetime.timedelta(days=10), periods=2, freq='D'))
assert_series_equal(actual, expected)
actual = percentile(start, 50, '1w')
expected = percentile(start, 50, 7)
assert_series_equal(actual, expected)
def test_regression():
x1 = pd.Series([0.0, 1.0, 4.0, 9.0, 16.0, 25.0, np.nan], index=pd.date_range('2019-1-1', periods=7), name='x1')
x2 = pd.Series([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], index=pd.date_range('2019-1-1', periods=8))
y = pd.Series([10.0, 14.0, 20.0, 28.0, 38.0, 50.0, 60.0], index=pd.date_range('2019-1-1', periods=7))
with pytest.raises(MqTypeError):
LinearRegression([x1, x2], y, 1)
regression = LinearRegression([x1, x2], y, True)
np.testing.assert_almost_equal(regression.coefficient(0), 10.0)
np.testing.assert_almost_equal(regression.coefficient(1), 1.0)
np.testing.assert_almost_equal(regression.coefficient(2), 3.0)
np.testing.assert_almost_equal(regression.r_squared(), 1.0)
expected = pd.Series([10.0, 14.0, 20.0, 28.0, 38.0, 50.0], index=pd.date_range('2019-1-1', periods=6))
assert_series_equal(regression.fitted_values(), expected)
dates_predict = [date(2019, 2, 1), date(2019, 2, 2)]
predicted = regression.predict([pd.Series([2.0, 3.0], index=dates_predict),
pd.Series([6.0, 7.0], index=dates_predict)])
expected = pd.Series([30.0, 34.0], index=dates_predict)
assert_series_equal(predicted, expected)
np.testing.assert_almost_equal(regression.standard_deviation_of_errors(), 0)
def test_rolling_linear_regression():
x1 = pd.Series([0.0, 1.0, 4.0, 9.0, 16.0, 25.0, np.nan], index=pd.date_range('2019-1-1', periods=7), name='x1')
x2 = pd.Series([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0], index=pd.date_range('2019-1-1', periods=8))
y = pd.Series([10.0, 14.0, 20.0, 28.0, 28.0, 40.0, 60.0], index=pd.date_range('2019-1-1', periods=7))
with pytest.raises(MqValueError):
RollingLinearRegression([x1, x2], y, 3, True)
with pytest.raises(MqTypeError):
RollingLinearRegression([x1, x2], y, 4, 1)
regression = RollingLinearRegression([x1, x2], y, 4, True)
expected = pd.Series([np.nan, np.nan, np.nan, 10.0, 2.5, 19.0], index=pd.date_range('2019-1-1', periods=6))
assert_series_equal(regression.coefficient(0), expected, check_names=False)
expected = pd.Series([np.nan, np.nan, np.nan, 1.0, -1.5, 1.0], index=pd.date_range('2019-1-1', periods=6))
assert_series_equal(regression.coefficient(1), expected, check_names=False)
expected = pd.Series([np.nan, np.nan, np.nan, 3.0, 12.5, -1.0], index=pd.date_range('2019-1-1', periods=6))
assert_series_equal(regression.coefficient(2), expected, check_names=False)
expected = pd.Series([np.nan, np.nan, np.nan, 1.0, 0.964029, 0.901961], index=pd.date_range('2019-1-1', periods=6))
assert_series_equal(regression.r_squared(), expected, check_names=False)
expected = pd.Series([np.nan, np.nan, np.nan, 28.0, 28.5, 39.0], index=pd.date_range('2019-1-1', periods=6))
assert_series_equal(regression.fitted_values(), expected, check_names=False)
expected = pd.Series([np.nan, np.nan, np.nan, 0.0, 2.236068, 4.472136], index=pd.date_range('2019-1-1', periods=6))
assert_series_equal(regression.standard_deviation_of_errors(), expected, check_names=False)
def test_sir_model():
n = 1000
d = 100
i0 = 100
r0 = 0
s0 = n
beta = 0.5
gamma = 0.25
t = np.linspace(0, d, d)
def deriv(y, t_loc, n_loc, beta_loc, gamma_loc):
s, i, r = y
dsdt = -beta_loc * s * i / n_loc
didt = beta_loc * s * i / n_loc - gamma_loc * i
drdt = gamma_loc * i
return dsdt, didt, drdt
def get_series(beta_loc, gamma_loc):
# Initial conditions vector
y0 = s0, i0, r0
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(n, beta_loc, gamma_loc))
s, i, r = ret.T
dr = pd.date_range(dt.date.today(), dt.date.today() + dt.timedelta(days=d - 1))
return pd.Series(s, dr), pd.Series(i, dr), pd.Series(r, dr)
(s, i, r) = get_series(beta, gamma)
sir = SIRModel(beta, gamma, s, i, r, n)
assert abs(sir.beta() - beta) < 0.01
assert abs(sir.gamma() - gamma) < 0.01
beta = 0.4
gamma = 0.25
(s, i, r) = get_series(0.4, 0.25)
s_predict = sir.predict_s()
i_predict = sir.predict_i()
r_predict = sir.predict_r()
assert s_predict.size == d
assert i_predict.size == d
assert r_predict.size == d
with pytest.raises(MqTypeError):
SIRModel(beta, gamma, s, i, r, n, fit=0)
sir = SIRModel(beta, gamma, s, i, r, n, fit=False)
assert sir.beta() == beta
assert sir.gamma() == gamma
sir1 = SIRModel(beta, gamma, s, i, r, n, fit=False)
with DataContext(end=dt.date.today() + dt.timedelta(days=d - 1)):
sir2 = SIRModel(beta, gamma, s[0], i, r[0], | |
True)
self.update_treestore.set_value(iter, 1, sepolicy.boolean_desc(bools))
self.update_treestore.set_value(iter, 2, action[self.cur_dict["boolean"][bools]['active']])
self.update_treestore.set_value(iter, 3, True)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 1, (_("SELinux name: %s")) % bools)
self.update_treestore.set_value(niter, 3, False)
for path, tclass in self.cur_dict["fcontext"]:
operation = self.cur_dict["fcontext"][(path, tclass)]["action"]
setype = self.cur_dict["fcontext"][(path, tclass)]["type"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 0, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, (_("Add file labeling for %s")) % self.application)
if operation == "-d":
self.update_treestore.set_value(iter, 1, (_("Delete file labeling for %s")) % self.application)
if operation == "-m":
self.update_treestore.set_value(iter, 1, (_("Modify file labeling for %s")) % self.application)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("File path: %s")) % path)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("File class: %s")) % sepolicy.file_type_str[tclass])
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("SELinux file type: %s")) % setype)
for port, protocol in self.cur_dict["port"]:
operation = self.cur_dict["port"][(port, protocol)]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 3, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, (_("Add ports for %s")) % self.application)
if operation == "-d":
self.update_treestore.set_value(iter, 1, (_("Delete ports for %s")) % self.application)
if operation == "-m":
self.update_treestore.set_value(iter, 1, (_("Modify ports for %s")) % self.application)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 1, (_("Network ports: %s")) % port)
self.update_treestore.set_value(niter, 3, False)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 1, (_("Network protocol: %s")) % protocol)
self.update_treestore.set_value(niter, 3, False)
setype = self.cur_dict["port"][(port, protocol)]["type"]
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("SELinux file type: %s")) % setype)
for user in self.cur_dict["user"]:
operation = self.cur_dict["user"][user]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 0, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, _("Add user"))
if operation == "-d":
self.update_treestore.set_value(iter, 1, _("Delete user"))
if operation == "-m":
self.update_treestore.set_value(iter, 1, _("Modify user"))
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 1, (_("SELinux User : %s")) % user)
self.update_treestore.set_value(niter, 3, False)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
roles = self.cur_dict["user"][user]["role"]
self.update_treestore.set_value(niter, 1, (_("Roles: %s")) % roles)
mls = self.cur_dict["user"][user].get("range", "")
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, _("MLS/MCS Range: %s") % mls)
for login in self.cur_dict["login"]:
operation = self.cur_dict["login"][login]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 0, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, _("Add login mapping"))
if operation == "-d":
self.update_treestore.set_value(iter, 1, _("Delete login mapping"))
if operation == "-m":
self.update_treestore.set_value(iter, 1, _("Modify login mapping"))
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("Login Name : %s")) % login)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
seuser = self.cur_dict["login"][login]["seuser"]
self.update_treestore.set_value(niter, 1, (_("SELinux User: %s")) % seuser)
mls = self.cur_dict["login"][login].get("range", "")
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, _("MLS/MCS Range: %s") % mls)
for path in self.cur_dict["fcontext-equiv"]:
operation = self.cur_dict["fcontext-equiv"][path]["action"]
iter = self.update_treestore.append(None)
self.update_treestore.set_value(iter, 0, True)
self.update_treestore.set_value(iter, 2, operation)
self.update_treestore.set_value(iter, 0, True)
if operation == "-a":
self.update_treestore.set_value(iter, 1, (_("Add file equiv labeling.")))
if operation == "-d":
self.update_treestore.set_value(iter, 1, (_("Delete file equiv labeling.")))
if operation == "-m":
self.update_treestore.set_value(iter, 1, (_("Modify file equiv labeling.")))
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
self.update_treestore.set_value(niter, 1, (_("File path : %s")) % path)
niter = self.update_treestore.append(iter)
self.update_treestore.set_value(niter, 3, False)
src = self.cur_dict["fcontext-equiv"][path]["src"]
self.update_treestore.set_value(niter, 1, (_("Equivalence: %s")) % src)
self.show_popup(self.update_window)
def set_active_application_button(self):
if self.boolean_radio_button.get_active():
self.active_button = self.boolean_radio_button
if self.files_radio_button.get_active():
self.active_button = self.files_radio_button
if self.transitions_radio_button.get_active():
self.active_button = self.transitions_radio_button
if self.network_radio_button.get_active():
self.active_button = self.network_radio_button
def clearbuttons(self, clear=True):
self.main_selection_window.hide()
self.boolean_radio_button.set_visible(False)
self.files_radio_button.set_visible(False)
self.network_radio_button.set_visible(False)
self.transitions_radio_button.set_visible(False)
self.system_radio_button.set_visible(False)
self.lockdown_radio_button.set_visible(False)
self.user_radio_button.set_visible(False)
self.login_radio_button.set_visible(False)
if clear:
self.completion_entry.set_text("")
def show_system_page(self):
self.clearbuttons()
self.system_radio_button.set_visible(True)
self.lockdown_radio_button.set_visible(True)
self.applications_selection_button.set_label(_("System"))
self.system_radio_button.set_active(True)
self.tab_change()
self.idle_func()
def show_file_equiv_page(self, *args):
self.clearbuttons()
self.file_equiv_initialize()
self.file_equiv_radio_button.set_active(True)
self.applications_selection_button.set_label(_("File Equivalence"))
self.tab_change()
self.idle_func()
self.add_button.set_sensitive(True)
self.delete_button.set_sensitive(True)
def show_users_page(self):
self.clearbuttons()
self.login_radio_button.set_visible(True)
self.user_radio_button.set_visible(True)
self.applications_selection_button.set_label(_("Users"))
self.login_radio_button.set_active(True)
self.tab_change()
self.user_initialize()
self.login_initialize()
self.idle_func()
self.add_button.set_sensitive(True)
self.delete_button.set_sensitive(True)
def show_applications_page(self):
self.clearbuttons(False)
self.boolean_radio_button.set_visible(True)
self.files_radio_button.set_visible(True)
self.network_radio_button.set_visible(True)
self.transitions_radio_button.set_visible(True)
self.boolean_radio_button.set_active(True)
self.tab_change()
self.idle_func()
def system_interface(self, *args):
self.show_system_page()
def users_interface(self, *args):
self.show_users_page()
def show_mislabeled_files(self, checkbutton, *args):
iterlist = []
ctr = 0
ipage = self.inner_notebook_files.get_current_page()
if checkbutton.get_active() == True:
for items in self.liststore:
iter = self.treesort.get_iter(ctr)
iter = self.treesort.convert_iter_to_child_iter(iter)
iter = self.treefilter.convert_iter_to_child_iter(iter)
if iter != None:
if self.liststore.get_value(iter, 4) == False:
iterlist.append(iter)
ctr += 1
for iters in iterlist:
self.liststore.remove(iters)
elif self.application != None:
self.liststore.clear()
if ipage == EXE_PAGE:
self.executable_files_initialize(self.application)
elif ipage == WRITABLE_PAGE:
self.writable_files_initialize(self.application)
elif ipage == APP_PAGE:
self.application_files_initialize(self.application)
def fix_mislabeled(self, path):
cur = selinux.getfilecon(path)[1].split(":")[2]
con = selinux.matchpathcon(path, 0)[1].split(":")[2]
if self.verify(_("Run restorecon on %(PATH)s to change its type from %(CUR_CONTEXT)s to the default %(DEF_CONTEXT)s?") % {"PATH": path, "CUR_CONTEXT": cur, "DEF_CONTEXT": con}, title="restorecon dialog") == Gtk.ResponseType.YES:
self.dbus.restorecon(path)
self.application_selected()
def new_updates(self, *args):
self.update_button.set_sensitive(self.modified())
self.revert_button.set_sensitive(self.modified())
def update_or_revert_changes(self, button, *args):
self.update_gui()
self.update = (button.get_label() == _("Update"))
if self.update:
self.update_window.set_title(_("Update Changes"))
else:
self.update_window.set_title(_("Revert Changes"))
def apply_changes_button_press(self, *args):
self.close_popup()
if self.update:
self.update_the_system()
else:
self.revert_data()
self.finish_init = False
self.previously_modified_initialize(self.dbus.customized())
self.finish_init = True
self.clear_filters()
self.application_selected()
self.new_updates()
self.update_treestore.clear()
def update_the_system(self, *args):
self.close_popup()
update_buffer = self.format_update()
self.wait_mouse()
try:
self.dbus.semanage(update_buffer)
except dbus.exceptions.DBusException as e:
print(e)
self.ready_mouse()
self.init_cur()
def ipage_value_lookup(self, lookup):
ipage_values = {"Executable Files": 0, "Writable Files": 1, "Application File Type": 2, "Inbound": 1, "Outbound": 0}
for value in ipage_values:
if value == lookup:
return ipage_values[value]
return "Booleans"
def get_attributes_update(self, attribute):
attribute = attribute.split(": ")[1]
bool_id = attribute.split(": ")[0]
if bool_id == "SELinux name":
self.bool_revert = attribute
else:
return attribute
def format_update(self):
self.revert_data()
update_buffer = ""
for k in self.cur_dict:
if k in "boolean":
for b in self.cur_dict[k]:
update_buffer += "boolean -m -%d %s\n" % (self.cur_dict[k][b]["active"], b)
if k in "login":
for l in self.cur_dict[k]:
if self.cur_dict[k][l]["action"] == "-d":
update_buffer += "login -d %s\n" % l
elif "range" in self.cur_dict[k][l]:
update_buffer += "login %s -s %s -r %s %s\n" % (self.cur_dict[k][l]["action"], self.cur_dict[k][l]["seuser"], self.cur_dict[k][l]["range"], l)
else:
update_buffer += "login %s -s %s %s\n" % (self.cur_dict[k][l]["action"], self.cur_dict[k][l]["seuser"], l)
if k in "user":
for u in self.cur_dict[k]:
if self.cur_dict[k][u]["action"] == "-d":
update_buffer += "user -d %s\n" % u
elif "level" in self.cur_dict[k][u] and "range" in self.cur_dict[k][u]:
update_buffer += "user %s -L %s -r %s -R %s %s\n" % (self.cur_dict[k][u]["action"], self.cur_dict[k][u]["level"], self.cur_dict[k][u]["range"], self.cur_dict[k][u]["role"], u)
else:
update_buffer += "user %s -R %s %s\n" % (self.cur_dict[k][u]["action"], self.cur_dict[k][u]["role"], u)
if k in "fcontext-equiv":
for f in self.cur_dict[k]:
if self.cur_dict[k][f]["action"] == "-d":
update_buffer += "fcontext -d %s\n" % f
else:
update_buffer += "fcontext %s -e %s %s\n" % (self.cur_dict[k][f]["action"], self.cur_dict[k][f]["src"], f)
if k in "fcontext":
for f in self.cur_dict[k]:
if self.cur_dict[k][f]["action"] == "-d":
update_buffer += "fcontext -d %s\n" % f
else:
update_buffer += "fcontext %s -t %s -f %s %s\n" % (self.cur_dict[k][f]["action"], self.cur_dict[k][f]["type"], self.cur_dict[k][f]["class"], f)
if k in "port":
for port, protocol in self.cur_dict[k]:
if self.cur_dict[k][(port, protocol)]["action"] == "-d":
update_buffer += "port -d -p %s %s\n" % (protocol, port)
else:
update_buffer += "port %s -t %s -p %s %s\n" % (self.cur_dict[k][f]["action"], self.cur_dict[k][f]["type"], protocol, port)
return update_buffer
def revert_data(self):
ctr = 0
remove_list = []
update_buffer = ""
for items in self.update_treestore:
if not self.update_treestore[ctr][0]:
remove_list.append(ctr)
ctr += 1
remove_list.reverse()
for ctr in remove_list:
self.remove_cur(ctr)
def reveal_advanced_system(self, label, *args):
advanced = label.get_text() == ADVANCED_LABEL[0]
if advanced:
label.set_text(ADVANCED_LABEL[1])
else:
label.set_text(ADVANCED_LABEL[0])
self.system_policy_label.set_visible(advanced)
self.system_policy_type_combobox.set_visible(advanced)
def reveal_advanced(self, label, *args):
advanced = label.get_text() == ADVANCED_LABEL[0]
if advanced:
label.set_text(ADVANCED_LABEL[1])
else:
label.set_text(ADVANCED_LABEL[0])
self.files_mls_label.set_visible(advanced)
self.files_mls_entry.set_visible(advanced)
self.network_mls_label.set_visible(advanced)
self.network_mls_entry.set_visible(advanced)
def on_show_advanced_search_window(self, label, *args):
if label.get_text() == ADVANCED_SEARCH_LABEL[1]:
label.set_text(ADVANCED_SEARCH_LABEL[0])
self.close_popup()
else:
label.set_text(ADVANCED_SEARCH_LABEL[1])
self.show_popup(self.advanced_search_window)
def set_enforce_text(self, value):
if value:
self.status_bar.push(self.context_id, _("System Status: Enforcing"))
self.current_status_enforcing.set_active(True)
else:
self.status_bar.push(self.context_id, _("System Status: Permissive"))
self.current_status_permissive.set_active(True)
def set_enforce(self, button):
if not self.finish_init:
return
self.dbus.setenforce(button.get_active())
self.set_enforce_text(button.get_active())
def on_browse_select(self, *args):
filename = self.file_dialog.get_filename()
if filename == None:
return
self.clear_entry = False
self.file_dialog.hide()
self.files_path_entry.set_text(filename)
if self.import_export == 'Import':
self.import_config(filename)
elif self.import_export == 'Export':
self.export_config(filename)
def recursive_path(self, *args):
path = self.files_path_entry.get_text()
if self.recursive_path_toggle.get_active():
if not path.endswith("(/.*)?"):
self.files_path_entry.set_text(path + "(/.*)?")
elif path.endswith("(/.*)?"):
path = path.split("(/.*)?")[0]
self.files_path_entry.set_text(path)
def highlight_entry_text(self, entry_obj, *args):
txt = entry_obj.get_text()
if self.clear_entry:
entry_obj.set_text('')
self.clear_entry = False
def autofill_add_files_entry(self, entry):
text = entry.get_text()
if text == '':
return
if text.endswith("(/.*)?"):
self.recursive_path_toggle.set_active(True)
for d in sepolicy.DEFAULT_DIRS:
if text.startswith(d):
for t in self.files_type_combolist:
if t[0].endswith(sepolicy.DEFAULT_DIRS[d]):
self.combo_set_active_text(self.files_type_combobox, t[0])
def resize_columns(self, *args):
self.boolean_column_1 = self.boolean_treeview.get_col(1)
width = self.boolean_column_1.get_width()
renderer = self.boolean_column_1.get_cell_renderers()
def browse_for_files(self, *args):
self.file_dialog.show()
def close_config_window(self, *args):
self.file_dialog.hide()
def change_default_policy(self, *args):
if self.typeHistory == self.system_policy_type_combobox.get_active():
return
if self.verify(_("Changing the policy type will cause a relabel of the entire file system on the next boot. Relabeling takes a long time depending on the size of the file system. | |
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Concatenate, Reshape, Softmax
from keras import backend as K
import keras.losses
import os
import pickle
import numpy as np
from sklearn import preprocessing
import pandas as pd
import matplotlib.pyplot as plt
import string
import keras.backend as K
from keras.legacy import interfaces
from keras.optimizers import Optimizer
from scrambler.models import *
def one_hot_encode_msa(msa, ns=21) :
one_hot = np.zeros((msa.shape[0], msa.shape[1], ns))
for i in range(msa.shape[0]) :
for j in range(msa.shape[1]) :
one_hot[i, j, int(msa[i, j])] = 1.
return one_hot
def parse_a3m(filename):
seqs = []
table = str.maketrans(dict.fromkeys(string.ascii_lowercase))
# read file line by line
for line in open(filename,"r"):
# skip labels
if line[0] != '>':
# remove lowercase letters and right whitespaces
seqs.append(line.rstrip().translate(table))
# convert letters into numbers
alphabet = np.array(list("ARNDCQEGHILKMFPSTWYV-"), dtype='|S1').view(np.uint8)
msa = np.array([list(s) for s in seqs], dtype='|S1').view(np.uint8)
for i in range(alphabet.shape[0]):
msa[msa == alphabet[i]] = i
# treat all unknown characters as gaps
msa[msa > 20] = 20
return msa
def make_a3m(seqs) :
alphabet = np.array(list("ARNDCQEGHILKMFPSTWYV-"), dtype='|S1').view(np.uint8)
msa = np.array([list(s) for s in seqs], dtype='|S1').view(np.uint8)
for i in range(alphabet.shape[0]):
msa[msa == alphabet[i]] = i
msa[msa > 20] = 20
return msa
#Code from https://gist.github.com/mayukh18/c576a37a74a9a5160ff32a535c2907b9
class AdamAccumulate(Optimizer):
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0., amsgrad=False, accum_iters=1, **kwargs):
if accum_iters < 1:
raise ValueError('accum_iters must be >= 1')
super(AdamAccumulate, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
self.accum_iters = K.variable(accum_iters, K.dtype(self.iterations))
self.accum_iters_float = K.cast(self.accum_iters, K.floatx())
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
completed_updates = K.cast(K.tf.floordiv(self.iterations, self.accum_iters), K.floatx())
if self.initial_decay > 0:
lr = lr * (1. / (1. + self.decay * completed_updates))
t = completed_updates + 1
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t)))
# self.iterations incremented after processing a batch
# batch: 1 2 3 4 5 6 7 8 9
# self.iterations: 0 1 2 3 4 5 6 7 8
# update_switch = 1: x x (if accum_iters=4)
update_switch = K.equal((self.iterations + 1) % self.accum_iters, 0)
update_switch = K.cast(update_switch, K.floatx())
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
gs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
else:
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat, tg in zip(params, grads, ms, vs, vhats, gs):
sum_grad = tg + g
avg_grad = sum_grad / self.accum_iters_float
m_t = (self.beta_1 * m) + (1. - self.beta_1) * avg_grad
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(avg_grad)
if self.amsgrad:
vhat_t = K.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(K.update(vhat, (1 - update_switch) * vhat + update_switch * vhat_t))
else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, (1 - update_switch) * m + update_switch * m_t))
self.updates.append(K.update(v, (1 - update_switch) * v + update_switch * v_t))
self.updates.append(K.update(tg, (1 - update_switch) * sum_grad))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, (1 - update_switch) * p + update_switch * new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad}
base_config = super(AdamAccumulate, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
from keras.layers import Layer, InputSpec
from keras import initializers, regularizers, constraints
class LegacyInstanceNormalization(Layer):
def __init__(self,
axis=None,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(LegacyInstanceNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
ndim = len(input_shape)
if self.axis == 0:
raise ValueError('Axis cannot be zero')
if (self.axis is not None) and (ndim == 2):
raise ValueError('Cannot specify axis for rank 1 tensor')
self.input_spec = InputSpec(ndim=ndim)
if self.axis is None:
shape = (1,)
else:
shape = (input_shape[self.axis],)
if self.scale:
self.gamma = self.add_weight(shape=shape,
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(shape=shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
self.built = True
def call(self, inputs, training=None):
input_shape = K.int_shape(inputs)
reduction_axes = list(range(0, len(input_shape)))
if self.axis is not None:
del reduction_axes[self.axis]
del reduction_axes[0]
mean = K.mean(inputs, reduction_axes, keepdims=True)
stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
normed = (inputs - mean) / stddev
broadcast_shape = [1] * len(input_shape)
if self.axis is not None:
broadcast_shape[self.axis] = input_shape[self.axis]
if self.scale:
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
normed = normed * broadcast_gamma
if self.center:
broadcast_beta = K.reshape(self.beta, broadcast_shape)
normed = normed + broadcast_beta
return normed
def get_config(self):
config = {
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(InstanceNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
#1-hot MSA to PSSM
def msa2pssm(msa1hot, w):
beff = tf.reduce_sum(w)
f_i = tf.reduce_sum(w[:,None,None]*msa1hot, axis=0) / beff + 0.005#1e-9
h_i = tf.reduce_sum( -f_i * tf.math.log(f_i), axis=1)
return tf.concat([f_i, h_i[:,None]], axis=1)
#Reweight MSA based on cutoff
def reweight(msa1hot, cutoff):
with tf.name_scope('reweight'):
id_min = tf.cast(tf.shape(msa1hot)[1], tf.float32) * cutoff
id_mtx = tf.tensordot(msa1hot, msa1hot, [[1,2], [1,2]])
id_mask = id_mtx > id_min
w = 1.0/tf.reduce_sum(tf.cast(id_mask, dtype=tf.float32),-1)
return w
#Shrunk covariance inversion
def fast_dca(msa1hot, weights, penalty = 4.5):
nr = tf.shape(msa1hot)[0]
nc = tf.shape(msa1hot)[1]
ns = tf.shape(msa1hot)[2]
with tf.name_scope('covariance'):
x = tf.reshape(msa1hot, (nr, nc * ns))
num_points = tf.reduce_sum(weights) - tf.sqrt(tf.reduce_mean(weights))
mean = tf.reduce_sum(x * weights[:,None], axis=0, keepdims=True) / num_points
x = (x - mean) * tf.sqrt(weights[:,None])
cov = tf.matmul(tf.transpose(x), x)/num_points
with tf.name_scope('inv_convariance'):
cov_reg = cov + tf.eye(nc * ns) * penalty / tf.sqrt(tf.reduce_sum(weights))
inv_cov = tf.linalg.inv(cov_reg)
x1 = tf.reshape(inv_cov,(nc, ns, nc, ns))
x2 = tf.transpose(x1, [0,2,1,3])
features = tf.reshape(x2, (nc, nc, ns * ns))
x3 = tf.sqrt(tf.reduce_sum(tf.square(x1[:,:-1,:,:-1]),(1,3))) * (1-tf.eye(nc))
apc = tf.reduce_sum(x3,0,keepdims=True) * tf.reduce_sum(x3,1,keepdims=True) / tf.reduce_sum(x3)
contacts = (x3 - apc) * (1-tf.eye(nc))
return tf.concat([features, contacts[:,:,None]], axis=2)
#Collect input features (keras code)
def keras_collect_features(inputs, wmin=0.8) :
f1d_seq_batched, msa1hot_batched = inputs
f1d_seq = f1d_seq_batched[0, ...]
msa1hot = msa1hot_batched[0, ...]
nrow = K.shape(msa1hot)[0]
ncol = K.shape(msa1hot)[1]
w = reweight(msa1hot, wmin)
# 1D features
f1d_pssm = msa2pssm(msa1hot, w)
f1d = tf.concat(values=[f1d_seq, f1d_pssm], axis=1)
f1d = tf.expand_dims(f1d, axis=0)
f1d = tf.reshape(f1d, [1,ncol,42])
# 2D features
f2d_dca = tf.cond(nrow>1, lambda: fast_dca(msa1hot, w), lambda: tf.zeros([ncol,ncol,442], tf.float32))
f2d_dca = tf.expand_dims(f2d_dca, axis=0)
f2d = tf.concat([tf.tile(f1d[:,:,None,:], [1,1,ncol,1]),
tf.tile(f1d[:,None,:,:], [1,ncol,1,1]),
f2d_dca], axis=-1)
f2d = tf.reshape(f2d, [1,ncol,ncol,442+2*42])
return f2d
#Collect input features (tf code)
def pssm_func(inputs, diag=0.0):
x,y = inputs
_,_,L,A = [tf.shape(y)[k] for k in range(4)]
with tf.name_scope('1d_features'):
# sequence
x_i = x[0,:,:20]
# pssm
f_i = y[0,0, :, :]
# entropy
h_i = tf.zeros((L,1))
#h_i = K.sum(-f_i * K.log(f_i + 1e-8), axis=-1, keepdims=True)
# tile and combined 1D features
feat_1D = tf.concat([x_i,f_i,h_i], axis=-1)
feat_1D_tile_A = tf.tile(feat_1D[:,None,:], [1,L,1])
feat_1D_tile_B = tf.tile(feat_1D[None,:,:], [L,1,1])
with tf.name_scope('2d_features'):
ic = diag * tf.eye(L*A)
ic = tf.reshape(ic,(L,A,L,A))
ic = tf.transpose(ic,(0,2,1,3))
ic = tf.reshape(ic,(L,L,A*A))
i0 = tf.zeros([L,L,1])
feat_2D = tf.concat([ic,i0], axis=-1)
feat = tf.concat([feat_1D_tile_A, feat_1D_tile_B, feat_2D],axis=-1)
return tf.reshape(feat, [1,L,L,442+2*42])
def load_trrosetta_model(model_path) :
saved_model = load_model(model_path, custom_objects = {
'InstanceNormalization' : LegacyInstanceNormalization,
'reweight' : reweight,
'wmin' : 0.8,
'msa2pssm' : msa2pssm,
'tf' : tf,
'fast_dca' : fast_dca,
'keras_collect_features' : pssm_func
})
return saved_model
def _get_kl_divergence_keras(p_dist, p_theta, p_phi, p_omega, t_dist, t_theta, t_phi, t_omega) :
kl_dist = K.mean(K.sum(t_dist * K.log(t_dist / p_dist), axis=-1), axis=(-1, -2))
kl_theta = K.mean(K.sum(t_theta * K.log(t_theta / p_theta), axis=-1), axis=(-1, -2))
kl_phi = K.mean(K.sum(t_phi * K.log(t_phi / p_phi), axis=-1), axis=(-1, -2))
kl_omega = K.mean(K.sum(t_omega * K.log(t_omega / p_omega), axis=-1), axis=(-1, -2))
return K.mean(kl_dist + kl_theta + kl_phi + kl_omega, axis=1)
def optimize_trrosetta_scores(predictor, x, batch_size, n_iters, input_background, drop=None, scrambler_mode='inclusion', norm_mode='instance', adam_accum_iters=2, adam_lr=0.01, adam_beta_1=0.5, adam_beta_2=0.9, n_samples=4, sample_mode='gumbel', entropy_mode='target', entropy_bits=0., entropy_weight=1.) :
if not isinstance(x, list) :
x = [x]
group = [np.zeros((x[0].shape[0], 1))]
if | |
stc.StyleSetSpec(wx.stc.STC_P_STRING, "fore:%(strfg)s,face:%(mono)s" % faces)
stc.StyleSetSpec(wx.stc.STC_P_CHARACTER, "fore:%(strfg)s,face:%(mono)s" % faces)
stc.StyleSetSpec(wx.stc.STC_P_WORD, "fore:%(keywordfg)s,bold" % faces)
stc.StyleSetSpec(wx.stc.STC_P_TRIPLE, "fore:%(q3sfg)s" % faces)
stc.StyleSetSpec(wx.stc.STC_P_TRIPLEDOUBLE, "fore:%(q3fg)s,back:%(q3bg)s" % faces)
stc.StyleSetSpec(wx.stc.STC_P_CLASSNAME, "fore:%(deffg)s,bold" % faces)
stc.StyleSetSpec(wx.stc.STC_P_DEFNAME, "fore:%(deffg)s,bold" % faces)
stc.StyleSetSpec(wx.stc.STC_P_OPERATOR, "")
stc.StyleSetSpec(wx.stc.STC_P_IDENTIFIER, "")
stc.StyleSetSpec(wx.stc.STC_P_COMMENTBLOCK, "fore:#7F7F7F")
stc.StyleSetSpec(wx.stc.STC_P_STRINGEOL, "fore:#000000,face:%(mono)s,"
"back:%(eolbg)s,eolfilled" % faces)
stc.CallTipSetBackground(faces['calltipbg'])
stc.CallTipSetForeground(faces['calltipfg'])
class FormDialog(wx.Dialog):
"""
Dialog for displaying a complex editable form.
Uses ComboBox for fields with choices.
Uses two ListBoxes for list fields.
@param props [{
name: field name
?type: (bool | list | anything) if field has direct content,
or callback(dialog, field, panel, data) making controls
?label: field label if not using name
?help: field tooltip
?path: [data path, if, more, complex, nesting]
?choices: [value, ] or callback(field, path, data) returning list
?choicesedit true if value not limited to given choices
?component specific wx component to use
?toggle: if true, field is toggle-able and children hidden when off
?children: [{field}, ]
?link: "name" of linked field, cleared and repopulated on change,
or callable(data) doing required change and returning field name
?tb: [{type, ?help}] for SQLiteTextCtrl component, adds toolbar,
supported toolbar buttons "open" and "paste"
}]
@param autocomp list of words to add to SQLiteTextCtrl autocomplete,
or a dict for words and subwords
@param onclose callable(data) on closing dialog, returning whether to close
"""
def __init__(self, parent, title, props=None, data=None, edit=None, autocomp=None, onclose=None):
wx.Dialog.__init__(self, parent, title=title,
style=wx.CAPTION | wx.CLOSE_BOX | wx.RESIZE_BORDER)
self._ignore_change = False
self._editmode = bool(edit) if edit is not None else True
self._comps = collections.defaultdict(list) # {(path): [wx component, ]}
self._autocomp = autocomp
self._onclose = onclose
self._toggles = {} # {(path): wx.CheckBox, }
self._props = []
self._data = {}
self._rows = 0
panel_wrap = wx.ScrolledWindow(self)
panel_items = self._panel = wx.Panel(panel_wrap)
panel_wrap.SetScrollRate(0, 20)
self.Sizer = wx.BoxSizer(wx.VERTICAL)
sizer_buttons = self.CreateButtonSizer(wx.OK | (wx.CANCEL if self._editmode else 0))
panel_wrap.Sizer = wx.BoxSizer(wx.VERTICAL)
panel_items.Sizer = wx.GridBagSizer(hgap=5, vgap=0)
panel_items.Sizer.SetEmptyCellSize((0, 0))
panel_wrap.Sizer.Add(panel_items, border=10, proportion=1, flag=wx.RIGHT | wx.GROW)
self.Sizer.Add(panel_wrap, border=15, proportion=1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.Sizer.Add(sizer_buttons, border=5, flag=wx.ALL | wx.ALIGN_CENTER_HORIZONTAL)
self.Bind(wx.EVT_BUTTON, self._OnClose, id=wx.ID_OK)
for x in self, panel_wrap, panel_items:
ColourManager.Manage(x, "ForegroundColour", wx.SYS_COLOUR_BTNTEXT)
ColourManager.Manage(x, "BackgroundColour", wx.SYS_COLOUR_BTNFACE)
self.Populate(props, data, edit)
if self._editmode:
self.MinSize = (440, panel_items.Size[1] + 80)
else:
self.MinSize = (440, panel_items.Size[1] + 10)
self.Fit()
self.CenterOnParent()
def Populate(self, props, data, edit=None):
"""
Clears current content, if any, adds controls to dialog,
and populates with data.
"""
self._ignore_change = True
self._props = copy.deepcopy(props or [])
self._data = copy.deepcopy(data or {})
if edit is not None: self._editmode = edit
self._rows = 0
while self._panel.Sizer.Children: self._panel.Sizer.Remove(0)
for c in self._panel.Children: c.Destroy()
self._toggles.clear()
self._comps.clear()
for f in self._props: self._AddField(f)
for f in self._props: self._PopulateField(f)
self._panel.Sizer.AddGrowableCol(6, 1)
if len(self._comps) == 1: self._panel.Sizer.AddGrowableRow(0, 1)
self._ignore_change = False
self.Layout()
def GetData(self):
"""Returns the current data values."""
result = copy.deepcopy(self._data)
for p in sorted(self._toggles, key=len, reverse=True):
if not self._toggles[p].Value:
ptr = result
for x in p[:-1]: ptr = ptr.get(x) or {}
ptr.pop(p[-1], None)
return result
def _GetValue(self, field, path=()):
"""Returns field data value."""
ptr = self._data
path = field.get("path") or path
for x in path: ptr = ptr.get(x, {}) if isinstance(ptr, dict) else ptr[x]
return ptr.get(field["name"])
def _SetValue(self, field, value, path=()):
"""Sets field data value."""
ptr = parent = self._data
path = field.get("path") or path
for x in path:
ptr = ptr.get(x) if isinstance(ptr, dict) else ptr[x]
if ptr is None: ptr = parent[x] = {}
parent = ptr
ptr[field["name"]] = value
def _DelValue(self, field, path=()):
"""Deletes field data value."""
ptr = self._data
path = field.get("path") or path
for x in path: ptr = ptr.get(x, {})
ptr.pop(field["name"], None)
def _GetField(self, name, path=()):
"""Returns field from props."""
fields, path = self._props, list(path) + [name]
while fields:
for f in fields:
if [f["name"]] == path: return f
if f["name"] == path[0] and f.get("children"):
fields, path = f["children"], path[1:]
break # for f
def _GetChoices(self, field, path):
"""Returns the choices list for field, if any."""
result = field.get("choices") or []
if callable(result):
if path:
parentfield = self._GetField(path[-1], path[:-1])
data = self._GetValue(parentfield, path[:-1])
else: data = self.GetData()
result = result(data)
return result
def _Unprint(self, s, escape=True):
"""Returns string with unprintable characters escaped or stripped."""
enc = "unicode_escape" if isinstance(s, unicode) else "string_escape"
repl = (lambda m: m.group(0).encode(enc)) if escape else ""
return re.sub(r"[\x00-\x1f]", repl, s)
def _AddField(self, field, path=()):
"""Adds field controls to dialog."""
callback = field["type"] if callable(field.get("type")) \
and field["type"] not in (bool, list) else None
if not callback and not self._editmode and self._GetValue(field, path) is None: return
MAXCOL = 8
parent, sizer = self._panel, self._panel.Sizer
level, fpath = len(path), path + (field["name"], )
col = 0
if field.get("toggle"):
toggle = wx.CheckBox(parent)
if field.get("help"): toggle.ToolTip = field["help"]
if self._editmode:
toggle.Label = label=field["label"] if "label" in field else field["name"]
sizer.Add(toggle, border=5, pos=(self._rows, level), span=(1, 2), flag=wx.TOP | wx.BOTTOM)
else: # Show ordinary label in view mode, checkbox goes very gray
label = wx.StaticText(parent, label=field["label"] if "label" in field else field["name"])
if field.get("help"): label.ToolTip = field["help"]
mysizer = wx.BoxSizer(wx.HORIZONTAL)
mysizer.Add(toggle, border=5, flag=wx.RIGHT)
mysizer.Add(label)
sizer.Add(mysizer, border=5, pos=(self._rows, level), span=(1, 2), flag=wx.TOP | wx.BOTTOM)
self._comps[fpath].append(toggle)
self._toggles[tuple(field.get("path") or fpath)] = toggle
self._BindHandler(self._OnToggleField, toggle, field, path, toggle)
col += 2
if callback: callback(self, field, parent, self._data)
elif not field.get("toggle") or any(field.get(x) for x in ["type", "choices", "component"]):
ctrls = self._MakeControls(field, path)
for c in ctrls:
colspan = 2 if isinstance(c, wx.StaticText) else MAXCOL - level - col
brd, BRD = (5, wx.BOTTOM) if isinstance(c, wx.CheckBox) else (0, 0)
sizer.Add(c, border=brd, pos=(self._rows, level + col), span=(1, colspan), flag=BRD | wx.GROW)
col += colspan
self._rows += 1
for f in field.get("children") or (): self._AddField(f, fpath)
def _PopulateField(self, field, path=()):
"""Populates field controls with data state."""
if not self._editmode and self._GetValue(field, path) is None: return
fpath = path + (field["name"], )
choices = self._GetChoices(field, path)
value = self._GetValue(field, path)
ctrls = [x for x in self._comps[fpath]
if not isinstance(x, (wx.StaticText, wx.Sizer))]
if list is field.get("type"):
value = value or []
listbox1, listbox2 = (x for x in ctrls if isinstance(x, wx.ListBox))
listbox1.SetItems([self._Unprint(x) for x in choices if x not in value])
listbox2.SetItems(map(self._Unprint, value or []))
for j, x in enumerate(x for x in choices if x not in value):
listbox1.SetClientData(j, x)
for j, x in enumerate(value or []): listbox2.SetClientData(j, x)
listbox1.Enable(self._editmode)
listbox2.Enable(self._editmode)
for c in ctrls:
if isinstance(c, wx.Button): c.Enable(self._editmode)
else:
for i, c in enumerate(ctrls):
if not i and isinstance(c, wx.CheckBox) and field.get("toggle"):
c.Value = (value is not None)
self._OnToggleField(field, path, c)
c.Enable(self._editmode)
continue # for i, c
if isinstance(c, wx.stc.StyledTextCtrl):
c.SetText(value or "")
if self._autocomp and isinstance(c, SQLiteTextCtrl):
c.AutoCompClearAdded()
c.AutoCompAddWords(self._autocomp)
if isinstance(self._autocomp, dict):
for w, ww in self._autocomp.items():
c.AutoCompAddSubWords(w, ww)
elif isinstance(c, wx.CheckBox): c.Value = bool(value)
else:
if isinstance(value, (list, tuple)): value = "".join(value)
if isinstance(c, wx.ComboBox):
c.SetItems(map(self._Unprint, choices))
for j, x in enumerate(choices): c.SetClientData(j, x)
value = self._Unprint(value) if value else value
c.Value = "" if value is None else value
if isinstance(c, wx.TextCtrl): c.SetEditable(self._editmode)
else: c.Enable(self._editmode)
for f in field.get("children") or (): self._PopulateField(f, fpath)
def _MakeControls(self, field, path=()):
"""Returns a list of wx components for field."""
result = []
parent, ctrl = self._panel, None
fpath = path + (field["name"], )
label = field["label"] if "label" in field else field["name"]
accname = "ctrl_%s" % self._rows # Associating label click with control
if list is field.get("type"):
if not field.get("toggle") and field.get("type") not in (bool, list):
result.append(wx.StaticText(parent, label=label, name=accname + "_label"))
sizer_f = wx.BoxSizer(wx.VERTICAL)
sizer_l = wx.BoxSizer(wx.HORIZONTAL)
sizer_b1 = wx.BoxSizer(wx.VERTICAL)
sizer_b2 = wx.BoxSizer(wx.VERTICAL)
ctrl1 = wx.ListBox(parent, style=wx.LB_EXTENDED)
b1 = wx.Button(parent, label=">", size=(30, -1))
b2 = wx.Button(parent, label="<", size=(30, -1))
ctrl2 = wx.ListBox(parent, style=wx.LB_EXTENDED)
b3 = wx.Button(parent, label=u"\u2191", size=(20, -1))
b4 = wx.Button(parent, label=u"\u2193", size=(20, -1))
b1.ToolTip = "Add selected from left to right"
b2.ToolTip = "Remove selected from right"
b3.ToolTip = "Move selected items higher"
b4.ToolTip = "Move selected items lower"
ctrl1.SetName(accname)
ctrl1.MinSize = ctrl2.MinSize = (150, 100)
if field.get("help"): ctrl1.ToolTip = field["help"]
sizer_b1.Add(b1); sizer_b1.Add(b2)
sizer_b2.Add(b3); sizer_b2.Add(b4)
sizer_l.Add(ctrl1, proportion=1)
sizer_l.Add(sizer_b1, flag=wx.ALIGN_CENTER_VERTICAL)
sizer_l.Add(ctrl2, proportion=1)
sizer_l.Add(sizer_b2, flag=wx.ALIGN_CENTER_VERTICAL)
toplabel = wx.StaticText(parent, label=label, name=accname + "_label")
sizer_f.Add(toplabel, flag=wx.GROW)
sizer_f.Add(sizer_l, border=10, | |
REM @ider
def _get_all_{tbl}_rowids({self}):
r""" all_{tbl}_rowids <- {tbl}.get_all_rowids()
Returns:
list_ (list): unfiltered {tbl}_rowids
TemplateInfo:
Tider_all_rowids
tbl = {tbl}
Example:
>>> # ENABLE_DOCTEST
>>> from {autogen_modname} import * # NOQA
>>> {self}, config2_ = testdata_{autogen_key}()
>>> {self}._get_all_{tbl}_rowids()
"""
all_{tbl}_rowids = {self}.{dbself}.get_all_rowids({TABLE})
return all_{tbl}_rowids
# ENDBLOCK
'''
)
# RL IDER ALL ROWID
Tider_rl_dependant_all_rowids = ut.codeblock(
r'''
# STARTBLOCK
# REM @getter
def get_{root}_{leaf}_all_rowids({self}, {root}_rowid_list, eager=True, nInput=None):
r""" {leaf}_rowid_list <- {root}.{leaf}.all_rowids([{root}_rowid_list])
Gets {leaf} rowids of {root} under the current state configuration.
Args:
{root}_rowid_list (list):
Returns:
list: {leaf}_rowid_list
TemplateInfo:
Tider_rl_dependant_all_rowids
root = {root}
leaf = {leaf}
"""
# FIXME: broken
colnames = ({LEAF_PARENT}_ROWID,)
{leaf}_rowid_list = {self}.{dbself}.get(
{LEAF_TABLE}, colnames, {root}_rowid_list,
id_colname={ROOT}_ROWID, eager=eager, nInput=nInput)
return {leaf}_rowid_list
# ENDBLOCK
''')
#
#
#-----------------
# --- GETTERS ---
#-----------------
# LINES GETTER
Tline_pc_dependant_rowid = ut.codeblock(
r'''
# STARTBLOCK
{child}_rowid_list = {self}.get_{parent}_{child}_rowid({parent}_rowid_list, config2_=config2_, ensure=ensure)
# ENDBLOCK
'''
)
# RL GETTER MULTICOLUMN
Tgetter_rl_pclines_dependant_multicolumn = ut.codeblock(
r'''
# STARTBLOCK
# REM @getter
def get_{root}_{multicol}({self}, {root}_rowid_list, config2_=None, ensure=True):
r""" {leaf}_rowid_list <- {root}.{leaf}.{multicol}s[{root}_rowid_list]
Get {col} data of the {root} table using the dependant {leaf} table
Args:
{root}_rowid_list (list):
Returns:
list: {col}_list
TemplateInfo:
Tgetter_rl_pclines_dependant_column
root = {root}
col = {col}
leaf = {leaf}
Example:
>>> # DISABLE_DOCTEST
>>> from {autogen_modname} import * # NOQA
>>> {self}, config2_ = testdata_{autogen_key}()
>>> {root}_rowid_list = {self}._get_all_{root}_rowids()
>>> {multicol}_list = {self}.get_{root}_{multicol}({root}_rowid_list, config2_=config2_)
>>> assert len({multicol}_list) == len({root}_rowid_list)
"""
# REM Get leaf rowids
{pc_dependant_rowid_lines}
# REM Get col values
{multicol}_list = {self}.get_{leaf}_{multicol}({leaf}_rowid_list)
return {multicol}_list
# ENDBLOCK
''')
# NATIVE MULTICOLUMN GETTER
# eg. get_chip_sizes
Tgetter_native_multicolumn = ut.codeblock(
r'''
# STARTBLOCK
# REM @getter
def get_{tbl}_{multicol}({self}, {tbl}_rowid_list, eager=True):
r"""
Returns zipped tuple of information from {multicol} columns
Tgetter_native_multicolumn
Args:
{tbl}_rowid_list (list):
Returns:
list: {multicol}_list
Example:
>>> # ENABLE_DOCTEST
>>> from {autogen_modname} import * # NOQA
>>> {self}, config2_ = testdata_{autogen_key}()
>>> {tbl}_rowid_list = {self}._get_all_{tbl}_rowids()
>>> ensure = False
>>> {multicol}_list = {self}.get_{tbl}_{multicol}({tbl}_rowid_list, eager=eager)
>>> assert len({tbl}_rowid_list) == len({multicol}_list)
"""
id_iter = {tbl}_rowid_list
colnames = {MULTICOLNAMES}
{multicol}_list = {self}.{dbself}.get({TABLE}, colnames, id_iter, id_colname='rowid', eager=eager)
return {multicol}_list
# ENDBLOCK
''')
# RL GETTER COLUMN
Tgetter_rl_pclines_dependant_column = ut.codeblock(
r'''
# STARTBLOCK
# REM @getter
def get_{root}_{col}({self}, {root}_rowid_list, config2_=None, ensure=True):
r""" {leaf}_rowid_list <- {root}.{leaf}.{col}s[{root}_rowid_list]
Get {col} data of the {root} table using the dependant {leaf} table
Args:
{root}_rowid_list (list):
Returns:
list: {col}_list
TemplateInfo:
Tgetter_rl_pclines_dependant_column
root = {root}
col = {col}
leaf = {leaf}
"""
# REM Get leaf rowids
{pc_dependant_rowid_lines}
# REM Get col values
{col}_list = {self}.get_{leaf}_{col}({leaf}_rowid_list)
return {col}_list
# ENDBLOCK
''')
# NATIVE COLUMN GETTER
Tgetter_table_column = ut.codeblock(
r'''
# STARTBLOCK
# REM @getter
def get_{tbl}_{col}({self}, {tbl}_rowid_list, eager=True, nInput=None):
r""" {col}_list <- {tbl}.{col}[{tbl}_rowid_list]
gets data from the "native" column "{col}" in the "{tbl}" table
Args:
{tbl}_rowid_list (list):
Returns:
list: {col}_list
TemplateInfo:
Tgetter_table_column
col = {col}
tbl = {tbl}
Example:
>>> # ENABLE_DOCTEST
>>> from {autogen_modname} import * # NOQA
>>> {self}, config2_ = testdata_{autogen_key}()
>>> {tbl}_rowid_list = {self}._get_all_{tbl}_rowids()
>>> eager = True
>>> {col}_list = {self}.get_{tbl}_{col}({tbl}_rowid_list, eager=eager)
>>> assert len({tbl}_rowid_list) == len({col}_list)
"""
id_iter = {tbl}_rowid_list
colnames = ({COLNAME},)
{col}_list = {self}.{dbself}.get({TABLE}, colnames, id_iter, id_colname='rowid', eager=eager, nInput=nInput)
return {col}_list
# ENDBLOCK
''')
Tgetter_extern = ut.codeblock(
r'''
# STARTBLOCK
# REM @getter
def get_{tbl}_{externcol}({self}, {tbl}_rowid_list, eager=True, nInput=None):
r""" {externcol}_list <- {tbl}.{externcol}[{tbl}_rowid_list]
Args:
{tbl}_rowid_list (list):
Returns:
list: {externcol}_list
TemplateInfo:
Tgetter_extern
tbl = {tbl}
externtbl = {externtbl}
externcol = {externcol}
Example:
>>> # ENABLE_DOCTEST
>>> from {autogen_modname} import * # NOQA
>>> {self}, config2_ = testdata_{autogen_key}()
>>> {tbl}_rowid_list = {self}._get_all_{tbl}_rowids()
>>> eager = True
>>> {externcol}_list = {self}.get_{tbl}_{externcol}({tbl}_rowid_list, eager=eager)
>>> assert len({tbl}_rowid_list) == len({externcol}_list)
"""
{externtbl}_rowid_list = {self}.get_{tbl}_{externtbl}_rowid({tbl}_rowid_list, eager=eager, nInput=nInput)
{externcol}_list = {self}.get_{externtbl}_{externcol}({externtbl}_rowid_list, eager=eager, nInput=nInput)
return {externcol}_list
# ENDBLOCK
''')
# RL GETTER ROWID
Tgetter_rl_dependant_rowids = ut.codeblock(
r'''
# STARTBLOCK
# REM @getter
def get_{root}_{leaf}_rowid({self}, {root}_rowid_list, config2_=None, ensure=True, eager=True, nInput=None):
r""" {leaf}_rowid_list <- {root}.{leaf}.rowids[{root}_rowid_list]
Get {leaf} rowids of {root} under the current state configuration.
Args:
{root}_rowid_list (list):
Returns:
list: {leaf}_rowid_list
TemplateInfo:
Tgetter_rl_dependant_rowids
root = {root}
leaf_parent = {leaf_parent}
leaf = {leaf}
Example:
>>> # ENABLE_DOCTEST
>>> from {autogen_modname} import * # NOQA
>>> {self}, config2_ = testdata_{autogen_key}()
>>> {root}_rowid_list = {self}._get_all_{root}_rowids()
>>> {leaf}_rowid_list1 = {self}.get_{root}_{leaf}_rowid({root}_rowid_list, config2_, ensure=False)
>>> {leaf}_rowid_list2 = {self}.get_{root}_{leaf}_rowid({root}_rowid_list, config2_, ensure=True)
>>> {leaf}_rowid_list3 = {self}.get_{root}_{leaf}_rowid({root}_rowid_list, config2_, ensure=False)
>>> print({leaf}_rowid_list1)
>>> print({leaf}_rowid_list2)
>>> print({leaf}_rowid_list3)
"""
# REM if ensure:
# REM # Ensuring dependant columns is equivalent to adding cleanly
# REM return {self}.add_{root}_{leaf}({root}_rowid_list, config2_=config2_)
# REM else:
# Get leaf_parent rowids
{leaf_parent}_rowid_list = {self}.get_{root}_{leaf_parent}_rowid({root}_rowid_list, config2_=config2_, ensure=ensure)
{leaf}_rowid_list = get_{leaf_parent}_{leaf}_rowid({self}, {leaf_parent}_rowid_list, config2_=config2_, ensure=ensure)
# REM colnames = ({LEAF}_ROWID,)
# REM config_rowid = {self}.get_{leaf}_config_rowid(config2_=config2_)
# REM andwhere_colnames = ({LEAF_PARENT}_ROWID, CONFIG_ROWID,)
# REM params_iter = [({leaf_parent}_rowid, config_rowid,) for {leaf_parent}_rowid in {leaf_parent}_rowid_list]
# REM {leaf}_rowid_list = {self}.{dbself}.get_where2(
# REM {LEAF_TABLE}, colnames, params_iter, andwhere_colnames, eager=eager, nInput=nInput)
return {leaf}_rowid_list
# ENDBLOCK
''')
# PL GETTER ROWID WITHOUT ENSURE
Tgetter_pl_dependant_rowids_ = ut.codeblock(
r'''
# STARTBLOCK
# REM @getter
def get_{parent}_{leaf}_rowids_({self}, {parent}_rowid_list, config2_=None, eager=True, nInput=None):
r"""
equivalent to get_{parent}_{leaf}_rowids_ except ensure is constrained
to be False.
Also you save a stack frame because get_{parent}_{leaf}_rowids just
calls this function if ensure is False
TemplateInfo:
Tgetter_pl_dependant_rowids_
"""
colnames = ({LEAF}_ROWID,)
config_rowid = {self}.get_{leaf}_config_rowid(config2_=config2_)
andwhere_colnames = ({PARENT}_ROWID, CONFIG_ROWID,)
params_iter = (({parent}_rowid, config_rowid,) for {parent}_rowid in {parent}_rowid_list)
{leaf}_rowid_list = {self}.{dbself}.get_where2(
{LEAF_TABLE}, colnames, params_iter, andwhere_colnames, eager=eager, nInput=nInput)
return {leaf}_rowid_list
# ENDBLOCK
''')
# PL GETTER ROWID WITH ENSURE
Tgetter_pl_dependant_rowids = ut.codeblock(
r'''
# STARTBLOCK
# REM @getter
def get_{parent}_{leaf}_rowid({self}, {parent}_rowid_list, config2_=None, ensure=True, eager=True, nInput=None, recompute=False):
r""" {leaf}_rowid_list <- {parent}.{leaf}.rowids[{parent}_rowid_list]
get {leaf} rowids of {parent} under the current state configuration
if ensure is True, this function is equivalent to add_{parent}_{leaf}s
Args:
{parent}_rowid_list (list): iterable of rowids
ensure (bool): if True, computes nonexisting information (default=False)
config2_ (QueryParams): configuration for requested property
recompute (bool): if True, recomputed all requested information. (default=False)
eager (bool): experimental - if False return a generator (default=True)
nInput (int): experimental - size hint for input generator (default=None)
Returns:
list: {leaf}_rowid_list
TemplateInfo:
Tgetter_pl_dependant_rowids
parent = {parent}
leaf = {leaf}
python -m ibeis.templates.template_generator --key {leaf} --funcname-filter '\<get_{parent}_{leaf}_rowid\>' --modfname={autogen_modname}
Timeit:
>>> from {autogen_modname} import * # NOQA
>>> {self}, config2_ = testdata_{autogen_key}()
>>> # Test to see if there is any overhead to injected vs native functions
>>> %timeit get_{parent}_{leaf}_rowid({self}, {parent}_rowid_list)
>>> %timeit {self}.get_{parent}_{leaf}_rowid({parent}_rowid_list)
Example:
>>> # ENABLE_DOCTEST
>>> from {autogen_modname} import * # NOQA
>>> {self}, config2_ = testdata_{autogen_key}()
>>> {parent}_rowid_list = {self}._get_all_{parent}_rowids()
>>> ensure = False
>>> {leaf}_rowid_list = {self}.get_{parent}_{leaf}_rowid({parent}_rowid_list, config2_, ensure)
>>> assert len({leaf}_rowid_list) == len({parent}_rowid_list)
"""
if recompute:
# get existing rowids, delete them, recompute the request
{leaf}_rowid_list = get_{parent}_{leaf}_rowids_(
{self}, {parent}_rowid_list, config2_=config2_, eager=eager, nInput=nInput)
delete_{leaf}({self}, {leaf}_rowid_list, config2_=config2_)
{leaf}_rowid_list = add_{parent}_{leaf}({self}, {parent}_rowid_list, config2_=config2_)
elif ensure:
{leaf}_rowid_list = add_{parent}_{leaf}({self}, {parent}_rowid_list, config2_=config2_)
else:
{leaf}_rowid_list = get_{parent}_{leaf}_rowids_(
{self}, {parent}_rowid_list, config2_=config2_, eager=eager, nInput=nInput)
return {leaf}_rowid_list
# ENDBLOCK
''')
# NATIVE FROMSUPERKEY ROWID GETTER
#id_iter = (({tbl}_rowid,) for {tbl}_rowid in {tbl}_rowid_list)
Tgetter_native_rowid_from_superkey = ut.codeblock(
r'''
# STARTBLOCK
# REM @getter
def get_{tbl}_rowid_from_superkey({self}, {superkey_args}, eager=True, nInput=None):
r""" {tbl}_rowid_list <- {tbl}[{superkey_args}]
Args:
superkey lists: {superkey_args}
Returns:
{tbl}_rowid_list
TemplateInfo:
Tgetter_native_rowid_from_superkey
tbl = {tbl}
"""
colnames = ({TBL}_ROWID,)
# FIXME: col_rowid is not correct
params_iter = zip({superkey_args})
andwhere_colnames = [{superkey_COLNAMES}]
{tbl}_rowid_list = {self}.{dbself}.get_where2(
{TABLE}, colnames, params_iter, andwhere_colnames, eager=eager, nInput=nInput)
return {tbl}_rowid_list
# ENDBLOCK
''')
#
#
#-----------------
# --- SETTERS ---
#-----------------
# NATIVE COL SETTER
Tsetter_native_column = ut.codeblock(
r'''
# STARTBLOCK
# REM @setter
def set_{tbl}_{col}({self}, {tbl}_rowid_list, {col}_list, duplicate_behavior='error'):
r""" {col}_list -> {tbl}.{col}[{tbl}_rowid_list]
Args:
{tbl}_rowid_list
{col}_list
TemplateInfo:
Tsetter_native_column
tbl = {tbl}
col = {col}
"""
id_iter = {tbl}_rowid_list
colnames = ({COLNAME},)
{self}.{dbself}.set({TABLE}, colnames, {col}_list, id_iter, duplicate_behavior=duplicate_behavior)
# ENDBLOCK
''')
Tsetter_native_multicolumn = ut.codeblock(
r'''
# STARTBLOCK
def set_{tbl}_{multicol}({self}, {tbl}_rowid_list, {multicol}_list, duplicate_behavior='error'):
r""" {multicol}_list -> {tbl}.{multicol}[{tbl}_rowid_list]
Tsetter_native_multicolumn
Args:
{tbl}_rowid_list (list):
Example:
>>> # ENABLE_DOCTEST
>>> from {autogen_modname} import * # NOQA
>>> {self}, config2_ = testdata_{autogen_key}()
>>> {multicol}_list = get_{tbl}_{multicol}({self}, {tbl}_rowid_list)
"""
id_iter = {tbl}_rowid_list
colnames = {MULTICOLNAMES}
{self}.{dbself}.set({TABLE}, colnames, {multicol}_list, id_iter, duplicate_behavior=duplicate_behavior)
# ENDBLOCK
''')
#
#
#-------------------------------
# --- UNFINISHED AND DEFERRED ---
#-------------------------------
Tdeleter_table1_relation = ut.codeblock(
r'''
# STARTBLOCK
# REM @deleter
def delete_{tbl1}_{relation_tbl}_relation({self}, {tbl1}_rowid_list):
r"""
Deletes the relationship between an {tbl1} and {tbl2}
TemplateInfo:
Tdeleter_relationship
tbl = {relation_tbl}
"""
{relation_tbl}_rowids_list = {self}.get_{tbl1}_{relation_tbl}_rowid({tbl1}_rowid_list)
{relation_tbl}_rowid_list = ut.flatten({relation_tbl}_rowids_list)
{self}.{dbself}.delete_rowids({RELATION_TABLE}, {relation_tbl}_rowid_list)
# ENDBLOCK
'''
)
Tgetter_table1_rowids = ut.codeblock(
r'''
# STARTBLOCK
# REM @deleter
def get_{tbl1}_{relation_tbl}_rowid({self}, {tbl1}_rowid_list):
r"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.