input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
node))
val = 1 - self.agent.goal_f(self.condition, state)
for i in range(len(path)):
if path[i][1] is not None:
path[i][1].update_value(val)
path[i][0].success = max(val, path[i][0].success)
path[i][0].discount = 0.9**(len(path)-i-1)
path = [step[0] for step in path]
if len(path) and val > 1. - 1e-3:
print(('\nSUCCESS! Tree {0} {1} using ff solve\n'.format(self.log_file, state)))
self.agent.add_task_paths([path])
else:
print('FAILED with ff solve')
print(('FF out:', task_path, 'Ran:', [step.task for step in path], 'End state:', state, 'Targets:', targets, 'Init state', init_state))
self.opt_strength = old_opt_strength
self.val_per_run.append(val)
self.log_path(path, -1)
return val, path
def get_path_info(self, x0, node, task, traj):
return [x0, node, task, traj]
def simulate(self, state, use_distilled=False, early_stop_prob=0.0, fixed_paths=[], debug=False):
current_node = self.root
path = []
samples = []
self.n_runs += 1
success = True
cur_state = state.copy()
prev_sample = None
terminated = False
iteration = 0
exclude_hl = []
path_value = 0. # None
next_sample = None
self.agent.reset_to_state(state)
if np.random.uniform() < self.ff_thresh:
val, path, _ = self.eval_pr_graph(state)
return val, path
while True:
if debug:
print("Taking simulation step")
if self.agent.goal_f(self.condition, cur_state) == 0: # or current_node.depth >= self.max_depth:
if not iteration:
print((state, self.agent.targets[self.condition]))
print('WARNING: Should not succeed without sampling')
break
if len(fixed_paths) <= iteration:
value, next_node, next_sample = self._choose_next(cur_state, current_node, prev_sample, exclude_hl, use_distilled, debug=debug)
else:
path_info = fixed_paths[iteration]
value, next_node, next_sample = self._simulate_fixed_path(*path_info)
# if np.random.uniform() > 0.9 and current_node is self.root: print(next_sample.get(STATE_ENUM))
path_value = np.maximum(value, path_value)
self.node_history[tuple(cur_state)] = current_node
if next_node is None or next_sample is None or next_node.depth > self.max_depth:
break
# if len(fixed_paths) <= iteration and np.random.uniform() > 0.9:
# print(next_sample.get_X(), '<---- sampled path in tree')
next_sample.node = next_node.parent
next_node.sample_links[next_sample] = prev_sample # Used to retrace paths
prev_sample = next_sample
cur_state = next_sample.end_state # get_X(t=next_sample.T-1)
current_node = next_node
# exclude_hl += [self._encode_f(cur_state, plan, self.agent.targets[self.condition])]
iteration += 1
# if np.random.uniform() < early_stop_prob:
# break
if path_value is 0:
path_value = 1 - self.agent.goal_f(self.condition, cur_state)
end_sample = next_sample
path = []
while current_node is not self.root and prev_sample in current_node.sample_links:
path.append(prev_sample)
cur_state = prev_sample.end_state # get_X(t=prev_sample.T-1)
path_value = np.maximum(path_value, 1-self.agent.goal_f(self.condition, cur_state))
prev_sample.task_cost = 1-path_value
prev_sample.success = path_value
prev_sample = current_node.sample_links[prev_sample]
current_node.sample_links = {}
current_node.update_value(path_value)
current_node = current_node.parent
path.reverse()
if end_sample is not None:
if end_sample not in path:
path.append(end_sample)
end_sample.success = path_value
end_sample.task_cost = 1. - path_value
n = end_sample
while hasattr(n, 'next_sample') and n.next_sample is not None:
next_n = n.next_sample
n.next_sample = None
n = next_n
n.success = path_value
n.task_cost = 1. - path_value
path.append(n)
self.val_per_run.append(path_value)
if len(path) and path_value > 1. - 1e-3:
self.n_success += 1
self.first_success = np.minimum(self.first_success, self.n_runs)
end = path[-1]
print(('\nSUCCESS! Tree {0} {1} using fixed: {2} {3}\n'.format(self.log_file, state, len(fixed_paths) != 0, self.n_runs)))
for s in path:
s.prim_use_ts *= MCTS_WEIGHT
self.agent.add_task_paths([path])
elif len(path) and self.her:
old_nodes = [path[i].node for i in range(len(path))]
for s in path:
s.node = None
s.next_sample = None
new_path = self.agent.relabel_path(path)
self.agent.add_task_paths([new_path])
for i in range(len(path)):
path[i].node = old_nodes[i]
self.log_path(path, len(fixed_paths))
for n in range(len(path)):
path[n].discount = 0.9**(len(path)-n-1)
if self.bad_tree:
print(('Bad tree for state', state))
self.reset()
if len(path): path[-1].done = 1
return path_value, path
def simulate_from_next(self, node, state, prev_sample, num_samples=1, save=False, exclude_hl=[], use_distilled=True, debug=False):
if debug:
print("Running simulate from next")
label = node.label
value, samples = self._default_simulate_from_next(label, node.depth, node.depth, state, self.prob_func, [], num_samples, save, exclude_hl, use_distilled, [], debug=debug)
if len(samples):
pass
# node.sample_links[samples[0]] = prev_sample
else:
samples.append(None)
for i in range(len(samples) - 1):
samples[i].next_sample = samples[i+1]
return value, samples[0]
def rollout_with_postcond(self, state, targets, max_t=10, task_ts=20, soft=False, eta=None, mode=''):
prev_tasks = []
cur_run = [0]
def task_f(s, t, curtask):
next_task = self.run_hl(s, t, curtask, targets, check_cost=False)
if len(prev_tasks) and tuple(next_task) != tuple(prev_tasks[-1]):
s.targets = targets
postcost = self.agent.postcond_cost(s, prev_tasks[-1], t)
if postcost > 0:
next_task = prev_tasks[-1]
if len(prev_tasks) and tuple(next_task) == tuple(prev_tasks[-1]):
cur_run.append(cur_run[-1]+1)
else:
cur_run.append(0)
prev_tasks.append(next_task)
return next_task
self.agent.reset_to_state(state)
old_opt = self.opt_strength
path = []
val = 0
t = 0
old_soft = self._soft
self._soft = soft
old_eta = self.eta
if eta is not None: self.eta = eta
l = list(self.agent.plans.keys())[0]
l = self.iter_labels(state, l, targets=targets, debug=False, check_cost=False)
s, t = 0, 0
col_s, col_ts = -1, -1
while t < max_t and val < 1-1e-2 and l is not None:
l = self.iter_labels(state, l, targets=targets, debug=False, check_cost=False)
if l is None: break
task_name = self.tasks[l[0]]
pol = self.rollout_policy[task_name]
plan = self.agent.plans[l]
s = self.agent.sample_task(pol, self.condition, state, l, task_f=task_f, skip_opt=True)
val = 1 - self.agent.goal_f(0, s.get_X(s.T-1), targets)
state = s.end_state # s.get_X(s.T-1)
path.append(s)
if mode == 'collision' and 1 in s.col_ts:
col_s = t
col_t = s.col_ts.tolist().index(1)
t += 1
if cur_run[-1] >= task_ts:
break
if col_ts >= 0:
task = tuple(path[col_ts].get(FACTOREDTASK_ENUM, t=col_ts))
ts = col_ts - 2
if ts < 0:
col_s -= 1
if col_s < 0:
col_s, col_ts = 0, 0
else:
ts = path[col_s].T + ts - 1
x = path[col_s].get_X(t=ts)
plan = self.agent.plans[task]
success = self.agent.backtrack_solve(plan, x0=x)
if success:
new_samples = self.agent.run_plan(plan, targets, record=False, save=False)
for s in new_samples:
self.optimal_samples[self.agent.task_list[task[0]]].append(s)
print('OPT on collision in rollout:', success, task, x)
if val < 1-1e-3:
last_task = tuple(path[-1].get(FACTOREDTASK_ENUM, t=path[-1].T-1))
t = len(prev_tasks)-1
while t >= 0 and tuple(last_task) == tuple(prev_tasks[t]):
t -= 1
ind = 0
while t >= path[ind].T:
ind += 1
t -= path[ind].T
s, t = ind, t
self.opt_strength = old_opt
self.eta = old_eta
self.log_path(path, -50)
self._soft = old_soft
# (s, t) indexes the switch where it failed postconditions
return val, path, s, t
def test_run(self, state, targets, max_t=20, hl=False, soft=False, check_cost=True, eta=None):
self.agent.reset_to_state(state)
old_opt = self.opt_strength
# self.opt_strength = 1.
path = []
val = 0
l = (0,0,0,0)
t = 0
old_soft = self._soft
self._soft = soft
old_eta = self.eta
if eta is not None: self.eta = eta
debug = np.random.uniform() < 0.1
while t < max_t and val < 1-1e-2 and l is not None:
l = self.iter_labels(state, l, targets=targets, debug=debug, check_cost=check_cost)
if l is None: break
plan = self.agent.plans[l]
s, _ = self.sample(l, state, plan, 1, hl=hl, hl_check=check_cost, save=False, skip_opt=True)
val = 1 - self.agent.goal_f(0, s.get_X(s.T-1), targets)
t += 1
state = s.end_state # s.get_X(s.T-1)
path.append(s)
self.opt_strength = old_opt
self.eta = old_eta
self.log_path(path, -5)
self._soft = old_soft
return val, path
def get_bad_labels(self, state):
labels = [l for l in self.label_options]
bad = []
for l in labels:
cost = self.agent.cost_f(state, l, self.condition, active_ts=(0,0), debug=debug)
if cost > 1e-3:
bad.append(l)
return bad
def run_hl(self, sample, t=0, task=None, targets=None, check_cost=False, debug=False):
next_label, distr = self.eval_hl(sample, t, targets, debug, True)
# if t== 0:
# distrs = self.prob_func(sample.get_prim_obs(t=t), False, eta=1.)
# print(distrs, sample.get(STATE_ENUM, t=t), sample.get(ONEHOT_GOAL_ENUM, t=t))
if not check_cost: return next_label
return self.iter_distr(next_label, distr, self.label_options, sample.get_X(t), sample)
def eval_hl(self, sample, t=0, targets=None, debug=False, find_distr=False):
labels = [l for l in self.label_options]
if self.use_q:
obs = sample.get_val_obs(t=t)
opts = self.agent.prob.get_prim_choices(self.agent.task_list)
distr = np.zeros(len(labels))
dact = np.sum([len(opts[e]) for e in opts])
for i in range(len(labels)):
l = labels[i]
act = np.zeros(dact)
cur_ind = 0
for j, e in enumerate(opts):
act[cur_ind + l[j]] = 1.
cur_ind += len(opts[e])
distr[i:i+1] = self.value_func(obs, act)
if self._soft:
exp_wt = np.exp(self.eta*(distr - np.max(distr)))
wt = exp_wt / np.sum(exp_wt)
ind = np.random.choice(list(range(len(labels))), p=wt)
else:
ind = np.argmax(distr)
next_label = tuple(labels[ind])
elif self.discrete_prim:
task = sample.task if hasattr(sample, 'task') else None
distrs = self.prob_func(sample.get_prim_obs(t=t), self._soft, eta=self.eta, t=t, task=task)
for d in distrs:
for i in range(len(d)):
d[i] = round(d[i], 5)
if self.onehot_task:
distr = distrs[0]
val = np.max(distr)
ind = np.random.choice([i for i in range(len(distr)) if distr[i] >= val])
next_label = self.agent.task_to_onehot[ind]
else:
distr = [np.prod([distrs[i][l[i]] for i in range(len(l))]) for l in labels]
distr = np.array(distr)
ind = []
for d in distrs:
val = np.max(d)
ind.append(np.random.choice([i for i in range(len(d)) if d[i] >= val]))
next_label = tuple(ind) # tuple([ind[d] for d in range(len(distrs))])
if find_distr: return tuple(next_label), distr
return tuple(next_label)
def iter_distr(self, next_label, distr, labels, end_state, sample, debug=False):
cost = self.agent.cost_f(end_state, next_label, self.condition, active_ts=(0,0), debug=debug)
post = 1. # self.agent.cost_f(end_state, next_label, self.condition, active_ts=(sample.T-1,sample.T-1), debug=debug)
init_label = next_label
T = self.agent.plans[next_label].horizon - 1
while (cost > 0 or post < 1e-3) and np.any(distr | |
the array of expected galaxy counts per bin.
SN_dist -- comoving distance to SN along line of sight.
OM -- matter density parameter. Defaults to 0.27.
"""
H0 = 1000 * 100 * h # km/s/Gpc
coeff = 3.0 * H0 ** 2 * OM / (2.0 * c ** 2)
sf_arr = 1.0 / (1.0 + zs)
k_i = coeff * chis * chi_widths * (SN_dist - chis) / SN_dist / expected_arr / sf_arr
return np.sum(k_i)
def general_convergence(chi_widths, chis, zs, d_arr, SN_dist, OM=0.27, h=0.738):
"""Calculates convergence from an array of overdesnities for all bins along line of sight.
Inputs:
chi_widths -- the width of the comoving distance bins.
chis -- the mean comoving distances of each bin.
zs -- the mean redshift of each bin, for the scale factor.
d_arr -- overdensity array.
SN_dist -- comoving distance to SN along line of sight.
OM -- matter density parameter. Defaults to 0.27.
"""
H0 = 1000 * 100 * h # km/s/Gpc
coeff = 3.0 * H0 ** 2 * OM / (2.0 * c ** 2)
sf_arr = 1.0 / (1.0 + zs)
k_i = coeff * chis * chi_widths * (SN_dist - chis) / SN_dist * d_arr / sf_arr
return np.sum(k_i), k_i
def calc_single_d(chi_widths, chis, zs, z_widths, z_SN, use_chi=True):
"""Uses single_m_convergence with index starting at 0 and going along the entire line of sight.
Inputs:
chi_widths -- the width of the comoving distance bins.
chis -- the mean comoving distances of each bin.
zs -- the mean redshift of each bin, for the scale factor.
z_SN -- the reshift of the SN.
use_chi -- boolean that determined whether equal comoving distance or redshift bins are used.
"""
comoving_to_SN = b_comoving(0, z_SN)
chi_SN = comoving_to_SN[-1]
convergence = np.linspace(0, 0, len(chis))
mass = MSOL * 10 ** 15
for i in range(0, len(chis)):
if use_chi:
convergence[i] = single_d_convergence(chi_widths, chis, zs, i, 1, chi_SN)
else:
convergence[i] = single_d_convergence_z(z_widths, chis, zs, i, 1, chi_SN)
return convergence
def plot_smoothed_d(chi_widths, chis, zs, z_SN):
"""Plots general_convergence for overdensities that are increasingly smoothed over the line of sight.
Also plots the case where the overdensity along the entire line of sight is 0.
Inputs:
chi_widths -- the width of the comoving distance bins.
chis -- the mean comoving distances of each bin.
zs -- the mean redshift of each bin, for the scale factor.
z_SN -- the reshift of the SN.
"""
comoving_to_SN = b_comoving(0, z_SN)
chi_SN = comoving_to_SN[-1]
size = 2 * len(zs) // 2 + 1
delta = np.zeros((size, len(zs)))
delta1 = 1
correction = delta1 / len(zs)
delta[0][int(len(zs) // 2):int(len(zs) // 2) + 1] = delta1
delta[-1][int(len(zs) // 2):int(len(zs) // 2) + 1] = -delta1
for i, s in enumerate(np.arange(1, len(zs) // 2 + 1, 1)):
delta[s][int(len(zs) // 2) - s:int(len(zs) // 2) + s + 1] = delta1 / (2 * s + 1)
delta[-s - 1][int(len(zs) // 2) - s:int(len(zs) // 2) + s + 1] = -delta1 / (2 * s + 1)
convergence = np.zeros(size)
convergence_cor = np.zeros(size)
delta_cor = np.zeros((size, len(zs)))
delta_cor[0:size // 2] = delta[0:size // 2] - correction
delta_cor[size // 2:] = delta[size // 2:] + correction
for j in range(size):
convergence[j], _ = general_convergence(chi_widths, chis, zs, delta[j], chi_SN)
convergence_cor[j], _ = general_convergence(chi_widths, chis, zs, delta_cor[j], chi_SN)
# convergence = np.delete(convergence, size // 2, 0)
convergence_cor = np.delete(convergence_cor, size // 2, 0)
plt.plot([size // 2, size // 2], [min(convergence) - 0.0003, max(convergence) + 0.0003],
color=[0.75, 0.75, 0.75], linestyle='--')
plt.plot([0, size - 1], [0, 0], color=[0.75, 0.75, 0.75], linestyle='--')
plt.plot(range(1, size // 2 + 1), convergence[:size // 2], label=f'Total $|\delta|$ = 1', color=colours[0], lw=2)
plt.plot(range(size // 2, size), convergence[size // 2:], color=colours[0], lw=2)
plt.plot(range(1, size), convergence_cor, label=f'Total $|\delta|$ = 0', color=colours[1], lw=2)
plt.text(37.5, 0.00045, '$\leftarrow$ $\delta$ > 0', fontsize=20, ha='center')
plt.text(62.5, 0.00045, '$\delta$ < 0 $\\rightarrow$', fontsize=20, ha='center')
plt.xlabel("Number of bins smoothed over")
plt.ylabel(" $\kappa \\times 10^{-3}$")
# plt.title(f"Convergence as a function of central overdensity smoothing (z$_S$$_N$ = {z_SN})")
plt.legend(frameon=0)
plt.xticks([0, 12.5, 25, 37.5, 50, 62.5, 75, 87.5, 100], [0, 25, 50, 75, 100, 75, 50, 25, 0])
plt.yticks([-0.001, -0.0005, 0.0, 0.0005, 0.001], [-1.0, -0.5, 0.0, 0.5, 1.0])
plt.axis([0, size, min(convergence) - 0.0003, max(convergence) + 0.0003])
plt.tight_layout()
plt.show()
for array in delta[:len(delta_cor) // 2]:
plt.bar(zs, array, width=chi_widths[0], edgecolor='w', lw=1.5)
plt.xlabel("$z$", fontsize=20)
plt.ylabel("$\delta_i$", fontsize=20)
plt.tick_params(labelsize=16)
# plt.plot([0, 0.6], [0, 0], color='k', linestyle='-')
plt.xlim([0, 0.6])
plt.ylim([0, 1])
plt.tight_layout()
plt.show()
def compare_z_chi(conv_c_arr, conv_z_arr, chi_bins_c, chi_bins_z, z_bins_z, z_bins_c, SN_dist, z_SN):
"""Plots the convergence distribution for even chi and z, over chi or z.
Inputs:
conv_c_arr -- array of convergence for even comoving bins.
conv_z_arr -- array of convergence for even redshift bins.
chi_bins_c -- mean comoving distance values of the equal comoving bins.
chi_bins_z -- mean comoving distance values of the equal redshift bins.
z_bins_c -- mean redshift values of the equal comoving bins.
z_bins_z -- mean redshift values of the equal redshift bins.
SN_dist -- comoving distance to SN along line of sight.
z_SN -- the reshift of the SN.
"""
plt.plot([SN_dist / 2, SN_dist / 2], [0, 1.1 * max(conv_c_arr)], linestyle='--', color=[0.75, 0.75, 0.75],
linewidth=1)
chi_peak_c = np.array(chi_bins_c)[np.argmin(np.abs(conv_c_arr - max(conv_c_arr)))]
chi_peak_z = np.array(chi_bins_z)[np.argmin(np.abs(conv_z_arr - max(conv_z_arr)))]
plt.plot(chi_bins_c, 1000 * conv_c_arr, label='Equal $\chi$', color=colours[0], lw=2)
plt.plot(chi_peak_c, 1000 * max(conv_c_arr), marker='x', color=colours[0], ms=8)
plt.text((chi_peak_z + chi_peak_c) / 2, 1000 * max(conv_c_arr) * 3.5 / 5, f'$\chi$ = {round(chi_peak_c, 2)} Gpc',
fontsize=20, ha='center', color=colours[0])
plt.plot(chi_bins_z, 1000 * conv_z_arr, label='Equal $z$', color=colours[1], lw=2)
plt.plot(chi_peak_z, 1000 * max(conv_z_arr), marker='x', color=colours[1], ms=8)
plt.text((chi_peak_z + chi_peak_c) / 2, 1000 * max(conv_c_arr) * 3 / 5, f'$\chi$ = {round(chi_peak_z, 2)} Gpc',
fontsize=20, ha='center', color=colours[1])
plt.xlabel("$\chi_L$ (Gpc)")
plt.ylabel("$\kappa\\times 10^{-3}$")
plt.legend(frameon=0, loc='upper left')
plt.axis([0, SN_dist, 0, 1100 * max(conv_c_arr)])
plt.tight_layout()
plt.show()
plt.plot([z_SN / 2, z_SN / 2], [0, 1.1 * max(conv_c_arr)], linestyle='--',
color=[0.75, 0.75, 0.75], linewidth=1)
z_peak_c = np.array(z_bins_c)[np.argmin(np.abs(conv_c_arr - max(conv_c_arr)))]
z_peak_z = np.array(z_bins_z)[np.argmin(np.abs(conv_z_arr - max(conv_z_arr)))]
plt.plot(z_bins_c, 1000 * conv_c_arr, label='Equal $\chi$', color=colours[0], lw=2)
plt.plot(z_peak_c, 1000 * max(conv_c_arr), marker='x', color=colours[0], ms=8)
plt.text((z_peak_z + z_peak_c) / 2, 1000 * max(conv_z_arr) * 3.5 / 5, f'$z$ = {round(z_peak_c, 2)}',
fontsize=20, ha='center', color=colours[0])
plt.plot(z_bins_z, 1000 * conv_z_arr, label='Equal $z$', color=colours[1], lw=2)
plt.plot(z_peak_z, 1000 * max(conv_z_arr), marker='x', color=colours[1], ms=8)
plt.text((z_peak_z + z_peak_c) / 2, 1000 * max(conv_z_arr) * 3 / 5, f'$z$ = {round(z_peak_z, 2)}',
fontsize=20, ha='center', color=colours[1])
plt.xlabel("$z_L$")
plt.ylabel("$\kappa\\times 10^{-3}$")
plt.legend(frameon=0, loc='upper right')
plt.axis([0, z_SN, 0, 1100 * max(conv_c_arr)])
plt.tight_layout()
plt.show()
def smoothed_m_convergence(tests, SN_dist, z_SN, OM=0.27, h=0.738):
"""Plots the convergence of a single mass confined to the centre of the LOS with decreasing bin width.
Inputs:
tests -- number of bin widths.
SN_dist -- comoving distance to supernova.
z_SN -- redshift of supernova.
OM -- matter density parameter. Defaults to 0.27.
"""
H0 = 1000 * 100 * h # km/s/Gpc
test_range = np.arange(3, tests, 4)
conv1 = np.zeros(len(test_range))
conv2 = np.zeros(len(test_range))
mass_mag = 15
mass = MSOL * 10 ** mass_mag
bin_lengths = np.zeros(len(test_range))
for num, y in enumerate(test_range):
(comoving_binwidths, comoving_bins, z_bins, z_widths) = create_chi_bins(0, z_SN, y + 1)
cone_rad = comoving_bins[len(z_bins) // 2] / (1 + z_bins[len(z_bins) // 2]) * 0.00349066
mid_value = len(z_bins) // 2
# print(y, mid_value)
theta = np.deg2rad(12.0 / 60.0)
# distance * 12 arcmin = 0.00349066 rad
vol_bin = (comoving_binwidths[0] * (1 + z_bins[len(z_bins) // 2])) * np.pi * cone_rad ** 2
# vol_bin = 2.0 / 3.0 * np.pi * (1 - np.cos(theta)) * (comoving_binwidths[mid_value]) / (1 + z_bins[mid_value])
Hz = get_h_inv(z_bins[mid_value]) ** (-1) * H0
# rho = mass / vol_bin
d_m1 = 8 * np.pi * G * mass / (3 * OM * vol_bin * Hz ** 2 * 3.086E31) - 1
d_m2 = 8 * np.pi * G * mass/1000.0 / (3 * OM * vol_bin * Hz ** 2 * 3.086E31) - 1
conv1[num] = single_d_convergence(comoving_binwidths, comoving_bins, z_bins, mid_value, d_m1, SN_dist)
conv2[num] = single_d_convergence(comoving_binwidths, comoving_bins, z_bins, mid_value, d_m2, SN_dist)
bin_lengths[num] = round(1000 * comoving_binwidths[0], 1)
| |
shutil.rmtree('./index', ignore_errors=True)
os.mkdir('./index')
genoIndex = '%s/index/%s' % (os.getcwd(),fastaclean.rpartition('/')[-1].rpartition('.')[0]) ## Can be merged with genoIndex from earlier part if we use bowtie2 earlier
# genoIndex = './index/%s' % (fastaclean.rpartition('/')[-1].rpartition('.')[0]) ## Alternative approach -Can be merged with genoIndex from earlier part if we use bowtie2 earlier
print('Creating index of cDNA/genomic sequences:%s**\n' % (genoIndex))
adcv = "256"
divn = "6"
### Run based on input about the memory
# if args.lowmem:
# retcode = subprocess.call(["bowtie-build","-f", fastaclean, genoIndex])
# else:
retcode = subprocess.call(["bowtie-build","-f", "--noauto", "--dcv", adcv,"--bmaxdivn", divn, fastaclean, genoIndex])
if retcode == 0:## The bowtie mapping exit with status 0, all is well
# print("Reference index prepared sucessfully")
pass
else:
print("There is some problem preparing index of reference '%s'" % (reference))
print("Is 'Bowtie' installed? And added to environment variable?")
print("Script will exit now")
sys.exit()
##########################################
### Test for index files #################
# ## Close this code if not testing
# fh_in1 = open("./index/Triticum_aestivum.TGACv1.dna.toplevel.clean.1.ebwtl",'w')
# fh_in1.write("Atul is a developer for PHASIS")
# fh_in1.close()
##########################################
### Make a memory file ###################
fh_out = open(memFile,'w')
print("Generating MD5 hash for reference")
refHash = (hashlib.md5(open('%s' % (reference),'rb').read()).hexdigest()) ### reference hash used instead of cleaned FASTA because while comparing only the user input reference is available
print("Generating MD5 hash for Bowtie index")
if os.path.isfile("%s.1.ebwtl" % (genoIndex)):
indexHash = (hashlib.md5(open('%s.1.ebwtl' % (genoIndex),'rb').read()).hexdigest())
elif os.path.isfile("%s.1.ebwt" % (genoIndex)):
indexHash = (hashlib.md5(open('%s.1.ebwt' % (genoIndex),'rb').read()).hexdigest())
else:
print("File extension for index couldn't be determined properly")
print("It could be an issue from Bowtie")
print("This needs to be reported to 'PHASIS' developer - Script will exit")
sys.exit()
print("\n@genomehash:%s | @indexhash:%s" % (refHash, indexHash) )
fh_out.write("@timestamp:%s\n" % (datetime.datetime.now().strftime("%m_%d_%H_%M")))
fh_out.write("@genomehash:%s\n" % (refHash))
fh_out.write("@index:%s\n" % (genoIndex))
fh_out.write("@indexhash:%s\n" % (indexHash))
print("Index prepared:%s\n" % (genoIndex))
# sys.exit()
return genoIndex
def indexIntegrityCheck(index):
'''
Checks the integrity of index and the extension
'''
indexFolder = index.rpartition("/")[0]
# print("This is the folder from earlier run:%s" % (indexFolder))
if os.path.isfile("%s.1.ebwtl" % (index)): ## Check if this extension exists in folder
indexExt = "ebwtl"
indexFiles = [i for i in os.listdir('%s' % (indexFolder)) if i.endswith('.ebwtl')]
if len(indexFiles) >= 6:
# print("Index has all six parts")
indexIntegrity = True
elif os.path.isfile("%s.1.ebwt" % (index)):
indexExt = "ebwt"
indexFiles = [i for i in os.listdir('%s' % (indexFolder)) if i.endswith('.ebwt')]
if len(indexFiles) >= 6:
# print("Index has all six parts")
indexIntegrity = True
else:
print("Existing index extension couldn't be determined")
print("Genome index will be remade")
indexExt = False
indexIntegrity = False
print("Ancillary data integrity :",indexIntegrity)
# print("Number of files:%s" % (len(indexFiles)))
return indexIntegrity,indexExt
def FASTAClean(filename,mode):
'''Cleans FASTA file - multi-line fasta to single line, header clean, empty lines removal'''
## Read seqeunce file
fh_in = open(filename, 'r')
print ("phasdetect uses FASTA header as key for identifying the phased loci")
print ("Caching '%s' reference FASTA file" % (filename))
## Write file
if mode == 0:
fastaclean = ('%s/%s.clean.fa' % (os.getcwd(),filename.rpartition('/')[-1].rpartition('.')[0])) ## os.getcwd(),fastaclean.rpartition('/')[-1].rpartition('.')[0]
else:
print("Input correct mode- 0: Normal | 1: Seqeunces reversed | 2: Seqeunces reverse complemented | 3: Seqeunces complemented only")
print("USAGE: cleanFasta.v.x.x.py FASTAFILE MODE")
sys.exit()
### Outfiles
fh_out1 = open(fastaclean, 'w')
fastasumm = ('%s/%s.summ.txt' % (os.getcwd(),filename.rpartition('/')[-1].rpartition('.')[0]))
fh_out2 = open(fastasumm, 'w')
fh_out2.write("Name\tLen\n")
### Read files
fasta = fh_in.read()
fasta_splt = fasta.split('>')
fastaD = {} ## Store FASTA as dict
acount = 0 ## count the number of entries
empty_count = 0
for i in fasta_splt[1:]:
ent = i.split('\n')
aname = ent[0].split()[0].strip()
if runType == 'G':
## To match with phasing-core script for genome version which removed non-numeric and preceding 0s
name = re.sub("[^0-9]", "", aname).lstrip('0')
else:
name = aname
seq = ''.join(x.strip() for x in ent[1:]) ## Sequence in multiple lines
alen = len(seq)
if alen > 200:
fh_out1.write('>%s\n%s\n' % (name,seq))
fh_out2.write('%s\t%s\n' % (name,alen))
acount+=1
else:
empty_count+=1
pass
#### Prepare a dictionary - Not Tested
# for line in fh_in:
# if line.startswith('>'):
# name = line[1:].rstrip('\n').split()[0]
# fastaD[name] = ''
# else:
# fastaD[name] += line.rstrip('\n').rstrip('*')
#### Write results - Not tested
# for name,seq in fastaD.items():
# alen = len(seq)
# if alen > 200:
# fh_out1.write('>%s\n%s\n' % (name,seq))
# fh_out2.write('%s\t%s\n' % (name,alen))
# acount+=1
# else:
# empty_count+=1
# pass
fh_in.close()
fh_out1.close()
fh_out2.close()
print("Fasta file with reduced header: '%s' with total entries %s is prepared" % (fastaclean, acount))
print("There were %s entries found with empty sequences and were removed\n" % (empty_count))
return fastaclean,fastasumm
def readMem(memFile):
'''
Reads memory file and gives global variables
'''
print ("#### Fn: memReader ############################")
fh_in = open(memFile,'r')
memRead = fh_in.readlines()
fh_in.close()
memflag = True
varcount = 0
for line in memRead:
if line: ## Not empty
if line.startswith('@'):
line = line.strip("\n")
# print(line)
akey,aval = line.split(':')
param = akey.strip()
value = aval.strip()
# print(param,value)
if param == '@genomehash':
global existRefHash
varcount+=1
existRefHash = str(value)
print('Existing reference hash :',existRefHash)
elif param == '@indexhash':
global existIndexHash
varcount+=1
existIndexHash = str(value)
print('Existing index hash :',existIndexHash)
elif param == '@index':
global index
varcount+=1
index = str(value)
print('Existing index location :',index)
else:
pass
## Sanity Check - Memory file is not empty, from a crash
# if existRefHash.strip() == '':
# memflag = False
# elif existIndexHash.strip() == '':
# memflag = False
# elif index.strip() == '':
# memflag = False
if varcount == 3:
memflag = True
else:
memflag = False
return memflag
def coreReserve(cores):
'''
Decides the core pool for machine - written to make PHASIS comaptible with machines that
have less than 10 cores - Will be improved in future
'''
if cores == 0:
## Automatic assignment of cores selected
totalcores = int(multiprocessing.cpu_count())
if totalcores == 4: ## For quad core system
nproc = 3
elif totalcores == 6: ## For hexa core system
nproc = 5
elif totalcores > 6 and totalcores <= 10: ## For octa core system and those with less than 10 cores
nproc = 7
else:
nproc = 10 #int(totalcores*0.9)
else:
## Reserve user specifed cores
nproc = int(cores)
return nproc
#### FASTA CLEAN P - IN DEV
def FASTAread(filename):
'''
Reads FASTA file to alist
'''
### Sanity check #####################
if not os.path.isfile(reference):
print("'%s' reference file not found" % (reference))
print("Please check the genomeFile - Is it in specified directory? Did you input wrong name?")
print("Script will exit for now\n")
sys.exit()
else:
print("Reference file located - Preparing to create index")
pass
#####################################
### Read seqeunce file ##############
fh_in = open(filename, 'r')
print ("phasdetect uses FASTA header as key for identifying the phased loci")
print ("Caching reference '%s' FASTA file" % (filename))
fasta = fh_in.read()
fasta_splt = fasta.split('>')
print("Cached FASTA file with %s entries" % (len(fasta_splt[1:])))
fh_in.close()
return fasta_splt[1:]
def FASTAclean(ent):
'''
Cleans one entry of FASTA file - multi-line fasta to single line, header clean, empty lines removal
'''
ent_splt = ent.split('\n')
aname = ent_splt[0].split()[0].strip()
# print("Cleaning - %s" % (aname))
if runType == 'G':
## To match with phasing-core script for genome version which removed non-numeric and preceding 0s
bname = re.sub("[^0-9]", "", aname).lstrip('0')
else:
bname = aname
bseq = ''.join(x.strip() for x in ent[1:]) ## Sequence in multiple lines
return bname,bseq
def FASTAwrite(filename,alist,mode):
'''
Writes list of processed/cleaned FASTA
'''
## Write file
if mode == 0:
fastaclean = ('%s/%s.clean.fa' % (os.getcwd(),filename.rpartition('/')[-1].rpartition('.')[0])) ## os.getcwd(),fastaclean.rpartition('/')[-1].rpartition('.')[0]
else:
print("Input correct mode- 0: Normal | 1: Seqeunces reversed | 2: Seqeunces reverse complemented | 3: Seqeunces complemented only")
print("USAGE: cleanFasta.v.x.x.py FASTAFILE MODE")
sys.exit()
### Outfiles
fh_out1 = open(fastaclean, 'w')
fastasumm = ('%s/%s.summ.txt' % (os.getcwd(),filename.rpartition('/')[-1].rpartition('.')[0]))
fh_out2 = open(fastasumm, 'w')
fh_out2.write("Name\tLen\n")
acount = 0 ## count the number of entries
empty_count = 0 ## count empty entries
for ent in alist:
aname,aseq = ent
alen = len(aseq)
if alen > 200:
fh_out1.write('>%s\n%s\n' % (aname,aseq))
fh_out2.write('%s\t%s\n' % (aname,alen))
acount+=1
else:
empty_count+=1
pass
fh_out1.close()
fh_out2.close()
print("Fasta file with reduced header: '%s' with total entries %s is prepared" % (fastaclean, acount))
print("There were %s entries with empty/short sequences,these were removed\n" % (empty_count))
return fastaclean,fastasumm
#### DE-DUPLICATOR MODULES ####
def dedup_process(alib):
'''
To parallelize the process
'''
print("\n#### Fn: De-duplicater #######################")
afastaL = dedup_fastatolist(alib) ## Read
acounter = deduplicate(afastaL | |
#!/usr/bin/env python
"""lipid-martini-itp.py creates a customized Martini lipid topologies, use lipid-martini-itp.py -h for description"""
__author__ = "<NAME>, and <NAME>"
__status__ = "Development"
__version__ = "0.5"
__email__ = "<EMAIL>"
import sys,math
# Very simple option class
class Option:
def __init__(self,func=str,num=1,default=None,description=""):
self.func = func
self.num = num
self.value = default
self.description = description
def __nonzero__(self):
if self.func == bool:
return self.value != False
return bool(self.value)
def __str__(self):
return self.value and str(self.value) or ""
def setvalue(self,v):
if len(v) == 1:
self.value = self.func(v[0])
else:
self.value = [ self.func(i) for i in v ]
# Description
desc = """
This scripts creates a customized Martini lipid topology based on the head, linker and
tail specification strings provided. The topology follows the standard Martini 2.0 lipid
definitions, lipids with these topologies have been explore e.g. in:
-<NAME>, <NAME>, <NAME>.
Coarse grained model for semi-quantitative lipid simulations.
JPC-B, 108:750-760, 2004.
-<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
The MARTINI force field: coarse grained model for biomolecular simulations.
JPC-B, 111:7812-7824, 2007.
-<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
Cholesterol shows preference for the interior of polyunsaturated lipid membranes.
JACS, 130:10-11, 2008.
-<NAME>, <NAME>.
The molecular face of lipid rafts in model membranes.
PNAS, 105:17367-17372, 2008.
-<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
The molecular mechanism of lipid monolayer collapse.
PNAS, 105:10803-10808, 2008.
-<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>.
Lipid organization of the plasma membrane.
JACS, 136:14554-14559, 2014.
-<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
Martini force field parameters for glycolipids.
JCTC, 9:1694-1708, 2013.
Description of this script can be found in the following manuscript, please cite:
-<NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
Computational lipidomics with insane: a versatile tool for generating custom membranes for molecular simulations.
JCTC, 150410125128004, 2015.
WARNING:
This script can generate topologies for numerous lipids many of which are unrealistic
and untested, please use with discretion. E.g. none of the sphingomyelin lipids have been thoroughly tested.
The lipid descriptions supported are as follows:
Heads (-alhead):
Please provide a list of lipid head beads. The left most bead will be on top, they are
connected in a sequence from left to right and the right most bead is connected to the
first bead in the linker. Each bead is connected with a bond (R_b = 0.47 and K_b = 1250).
<Warning some charged Martini lipids have used a shorter bond, R_b = 0.37, but here all
lipids use the same R_b = 0.47 bond between headgroup beads.>
There are no angles between the different head beads, but there is an angle
[last head bead / first linker bead / first bead of first tail] Theta_a = 180 and
K_a = 25 that helps orient the head with respect to the rest of the lipid. If an empty
string is provided the lipid will have no head and starts with the linker beads. Spaces
should separate the different beads; extra spaces are ignored.
head bead types supported:
C = NC3 = Choline - bead Q0, charge +1
E = NH3 = Ethanolamine - bead Qd, charge +1
G = GL0 = Glycerol - bead P4, charge 0
S = CNO = Serine - bead P5, charge 0
P = PO4 = Phosphate - bead Qa, charge -1
O = PO4 = Phosphate - bead Qa, charge -2
Examples of lipid heads:
"C P" -> 'NC3 PO4' - PC - PhosphatidylCholine
"E P" -> 'NH3 PO4' - PE - PhosphatidylEthanolamine
"G P" -> 'GL0 PO4' - PG - PhosphatidylGlycerol
"S P" -> 'CNO PO4' - PS - PhosphatidylSerine
"P" -> 'PO4 ---' - PA - Phosphatidic acid (charge -1, use "O" for charge -2)
"O" -> 'PO4 ---' - PA - Phosphatidic acid, one bond, not protonated (charge -2)
"" -> '--- ---' - DG - No head, Diacyl Glycerols if x2 Gly linkers are used (DAG)
Current version also supports writing PI, PIP, PIP2 and PIP3 headgrous (called PI, P1,
P2 and P3). These headgroups are hardcoded based on Cesar Lopez parameters see Lopez
et al. 2013 JCTC.
Linkers (-allink):
Currently only lipids with Glycerol linker ir sogubgisube backboneare supported. Each
linker is connected with a bond R_b = 0.37 and K_b 1250. The number of linkers and tails
provided have to match and each linker is connected with its corresponding tail with a
bond R_b = 0.47 and K_b = 1250. Additionally if more than one linker is provided an angle
[last head bead / first linker bead / second linker bead] Theta_a = 120 and K_a = 25 is
added to support the head / linker / tail orientation.
The sphingosine linking (for ceramide and sphingomyeline lipids) is not tested and
should be used with care. Only works with x2 linker beads AM1 and AM2. They are defined
with the standard bond between them and linker angle to the head. Just remember that AM1
and AM2 contain parts of the lipid tail, and normally tail A (AM1 tail) should start
with a T bead (containing the inital trans double bond) and be shorter than normal.
linker beads types supported:
G = GLY = Glycerols - bead Na with tail but P1 without, charge 0
A = Sphingosine backbone - always x2 - AM1 with P1 bead and AM2 with P5 bead, charge 0
Examples of lipid linkers:
"G G" -> 'GLY GLY ---' - A glycerol linker
"A A" -> 'AM1 AM2' - A sphingosine backbone
Tails (-altail):
One lipid tail definition should be provided for each linker, separated with a space;
extra spaces are ignored. Each tail can have an arbitrary number of tail beads. Tails
are connected by bonds, first bead to the tail's linker bead and then from left to
right (R_b = 0.47 and K_b 1250). To fix the tails orientation/dynamics an angle is
added for each tail bead [tail bead - 1 or linker bead / tail bead / tail bead + 1]
the Theta_a and K_a depend on the tail definition.
tail bead types supported:
C = corresponding roughly to a linear combination of x4 CH2 groups (CH2-CH2-CH2-CH2).
Represented with a C1 bead. Angle [X / C / X] Theta_a = 180 and K_a = 25.
D = corresponding roughly to a linear combination of x2 CH2 and x2 CH groups
(CH2-CH=CH-CH2), where the double bound is in a cis bond. Represented with a
C3 bead, except if there is another D before or after then use a C4 bead. For
the angles the standard [X / D / X] is a Theta_a = 120 and K_a = 45, except if
the next bead is also D [X / D / D] then use Theta_a = 100 and K_a = 10.
T = Same as D except with a trans double bond. The angle [X / T / X] is set with
equilibrium bond angle Theta_a = 180 and K_a = 45. Represented with a C3 bead.
Examples of tails:
Lyso tails:
"- CCCC " - C16-18:0 Lyso
"- CCCCC " - C20-22:0 Lyso
"- CCCCCC " - C24-26:0 Lyso
Saturated tails:
"CC CC " - C08-10:0 - diOctanoyl or diDecanoyl
"CCC CCC " - C12-14:0 - diLauric acid or diMyristoyl
"CCCC CCCC " - C16-18:0 - diPalmitic acid or diStearoyl
"CCCCC CCCCC " - C20-22:0 - diArachidoyl or diBehenoyl
"CCCCCC CCCCCC" - C24-26:0 - diLignoceroyl or diHexacosanoyl
Unsaturated tails:
"CDC CDC " - C14:1(9c) - diMyristoleoyl
"CDCC CDCC " - C16:1(9c) - diPalmitoleoyl / C18:1(9c) - diOleoyl
"CDDC CDDC " - C18:2(9c,12c) - diLinoleoyl
"CCDCC CCDCC " - C20:1(11c) - diGondic acid / C22:1(11c) - diErucoyl
"DDDDC DDDDC " - C20:4(5c,8c,11c,14c) - diArachidonoyl
"DDDDDD DDDDDD" - C22:6(4c,7c,10c,13c,16c,19c) - diDocosahexaenoyl
"CCCDCC CCCDCC" - C24:1(15c) - diNervonoyl
Mixed tails:
"CCCC CCC " | |
WC("b", S(1)) * acoth(x_ * WC("c", S(1)))) ** WC("n", S(1))
/ (x_ * sqrt(d_ + x_ ** S(2) * WC("e", S(1)))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons150,
cons1740,
)
rule6462 = ReplacementRule(pattern6462, replacement6462)
pattern6463 = Pattern(
Integral(
(WC("a", S(0)) + WC("b", S(1)) * atanh(x_ * WC("c", S(1)))) ** WC("n", S(1))
/ (x_ ** S(2) * sqrt(d_ + x_ ** S(2) * WC("e", S(1)))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons89,
cons90,
)
rule6463 = ReplacementRule(pattern6463, replacement6463)
pattern6464 = Pattern(
Integral(
(WC("a", S(0)) + WC("b", S(1)) * acoth(x_ * WC("c", S(1)))) ** WC("n", S(1))
/ (x_ ** S(2) * sqrt(d_ + x_ ** S(2) * WC("e", S(1)))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons89,
cons90,
)
rule6464 = ReplacementRule(pattern6464, replacement6464)
pattern6465 = Pattern(
Integral(
x_ ** m_
* (WC("a", S(0)) + WC("b", S(1)) * atanh(x_ * WC("c", S(1))))
** WC("n", S(1))
/ sqrt(d_ + x_ ** S(2) * WC("e", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons95,
cons90,
cons96,
cons1512,
)
rule6465 = ReplacementRule(pattern6465, replacement6465)
pattern6466 = Pattern(
Integral(
x_ ** m_
* (WC("a", S(0)) + WC("b", S(1)) * acoth(x_ * WC("c", S(1))))
** WC("n", S(1))
/ sqrt(d_ + x_ ** S(2) * WC("e", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons95,
cons90,
cons96,
cons1512,
)
rule6466 = ReplacementRule(pattern6466, replacement6466)
pattern6467 = Pattern(
Integral(
x_ ** m_
* (d_ + x_ ** S(2) * WC("e", S(1))) ** p_
* (WC("a", S(0)) + WC("b", S(1)) * atanh(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons1786,
cons139,
cons168,
cons1154,
)
rule6467 = ReplacementRule(pattern6467, replacement6467)
pattern6468 = Pattern(
Integral(
x_ ** m_
* (d_ + x_ ** S(2) * WC("e", S(1))) ** p_
* (WC("a", S(0)) + WC("b", S(1)) * acoth(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons1786,
cons139,
cons168,
cons1154,
)
rule6468 = ReplacementRule(pattern6468, replacement6468)
pattern6469 = Pattern(
Integral(
x_ ** m_
* (d_ + x_ ** S(2) * WC("e", S(1))) ** p_
* (WC("a", S(0)) + WC("b", S(1)) * atanh(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons1786,
cons139,
cons269,
cons1154,
)
rule6469 = ReplacementRule(pattern6469, replacement6469)
pattern6470 = Pattern(
Integral(
x_ ** m_
* (d_ + x_ ** S(2) * WC("e", S(1))) ** p_
* (WC("a", S(0)) + WC("b", S(1)) * acoth(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons1786,
cons139,
cons269,
cons1154,
)
rule6470 = ReplacementRule(pattern6470, replacement6470)
pattern6471 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** S(2) * WC("e", S(1))) ** p_
* (WC("a", S(0)) + WC("b", S(1)) * atanh(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons164,
cons139,
cons91,
cons321,
)
rule6471 = ReplacementRule(pattern6471, replacement6471)
pattern6472 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** S(2) * WC("e", S(1))) ** p_
* (WC("a", S(0)) + WC("b", S(1)) * acoth(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons164,
cons139,
cons91,
cons321,
)
rule6472 = ReplacementRule(pattern6472, replacement6472)
pattern6473 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** S(2) * WC("e", S(1))) ** p_
* (WC("a", S(0)) + WC("b", S(1)) * atanh(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons4,
cons1739,
cons64,
cons1787,
cons1742,
)
rule6473 = ReplacementRule(pattern6473, replacement6473)
pattern6474 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** S(2) * WC("e", S(1))) ** p_
* (WC("a", S(0)) + WC("b", S(1)) * atanh(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons4,
cons1739,
cons64,
cons1787,
cons1743,
)
rule6474 = ReplacementRule(pattern6474, replacement6474)
pattern6475 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** S(2) * WC("e", S(1))) ** p_
* (WC("a", S(0)) + WC("b", S(1)) * acoth(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons4,
cons1739,
cons64,
cons1787,
cons40,
)
rule6475 = ReplacementRule(pattern6475, replacement6475)
pattern6476 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** S(2) * WC("e", S(1))) ** p_
* (WC("a", S(0)) + WC("b", S(1)) * acoth(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons4,
cons1739,
cons64,
cons1787,
cons149,
)
rule6476 = ReplacementRule(pattern6476, replacement6476)
pattern6477 = Pattern(
Integral(
x_
* (x_ ** S(2) * WC("e", S(1)) + WC("d", S(0))) ** WC("p", S(1))
* (WC("a", S(0)) + WC("b", S(1)) * atanh(x_ * WC("c", S(1)))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons5,
cons56,
)
rule6477 = ReplacementRule(pattern6477, replacement6477)
pattern6478 = Pattern(
Integral(
x_
* (x_ ** S(2) * WC("e", S(1)) + WC("d", S(0))) ** WC("p", S(1))
* (WC("a", S(0)) + WC("b", S(1)) * acoth(x_ * WC("c", S(1)))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons5,
cons56,
)
rule6478 = ReplacementRule(pattern6478, replacement6478)
pattern6479 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (x_ ** S(2) * WC("e", S(1)) + WC("d", S(0))) ** WC("p", S(1))
* (WC("a", S(0)) + WC("b", S(1)) * atanh(x_ * WC("c", S(1)))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons5,
cons1788,
)
rule6479 = ReplacementRule(pattern6479, With6479)
pattern6480 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (x_ ** S(2) * WC("e", S(1)) + WC("d", S(0))) ** WC("p", S(1))
* (WC("a", S(0)) + WC("b", S(1)) * acoth(x_ * WC("c", S(1)))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons5,
cons1788,
)
rule6480 = ReplacementRule(pattern6480, With6480)
pattern6481 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** S(2) * WC("e", S(1))) ** WC("p", S(1))
* (WC("a", S(0)) + WC("b", S(1)) * atanh(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons40,
cons150,
cons1789,
)
rule6481 = ReplacementRule(pattern6481, replacement6481)
pattern6482 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (d_ + x_ ** S(2) * WC("e", S(1))) ** WC("p", S(1))
* (WC("a", S(0)) + WC("b", S(1)) * acoth(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons40,
cons150,
cons1789,
)
rule6482 = ReplacementRule(pattern6482, replacement6482)
pattern6483 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + WC("b", S(1)) * atanh(x_ * WC("c", S(1))))
* (d_ + x_ ** S(2) * WC("e", S(1))) ** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons5,
cons1790,
)
rule6483 = ReplacementRule(pattern6483, replacement6483)
pattern6484 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (a_ + WC("b", S(1)) * acoth(x_ * WC("c", S(1))))
* (d_ + x_ ** S(2) * WC("e", S(1))) ** WC("p", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons5,
cons1790,
)
rule6484 = ReplacementRule(pattern6484, replacement6484)
pattern6485 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (x_ ** S(2) * WC("e", S(1)) + WC("d", S(0))) ** WC("p", S(1))
* (WC("a", S(0)) + WC("b", S(1)) * atanh(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons4,
cons5,
cons1499,
)
rule6485 = ReplacementRule(pattern6485, replacement6485)
pattern6486 = Pattern(
Integral(
x_ ** WC("m", S(1))
* (x_ ** S(2) * WC("e", S(1)) + WC("d", S(0))) ** WC("p", S(1))
* (WC("a", S(0)) + WC("b", S(1)) * acoth(x_ * WC("c", S(1))))
** WC("n", S(1)),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons19,
cons4,
cons5,
cons1499,
)
rule6486 = ReplacementRule(pattern6486, replacement6486)
pattern6487 = Pattern(
Integral(
(WC("a", S(0)) + WC("b", S(1)) * atanh(x_ * WC("c", S(1)))) ** WC("n", S(1))
* atanh(u_)
/ (d_ + x_ ** S(2) * WC("e", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons89,
cons90,
cons1912,
)
rule6487 = ReplacementRule(pattern6487, replacement6487)
pattern6488 = Pattern(
Integral(
(WC("a", S(0)) + WC("b", S(1)) * acoth(x_ * WC("c", S(1)))) ** WC("n", S(1))
* acoth(u_)
/ (d_ + x_ ** S(2) * WC("e", S(1))),
x_,
),
cons2,
cons3,
cons8,
cons29,
cons50,
cons1739,
cons89,
cons90,
cons1912,
)
rule6488 = ReplacementRule(pattern6488, replacement6488)
pattern6489 = Pattern(
Integral(
| |
<gh_stars>100-1000
import numpy as np
import math
# from mxnet import nd
from mxnet.gluon import nn
class Augmentation(nn.HybridBlock):
def __init__(self, angle_range, zoom_range, translation_range, target_shape, orig_shape, batch_size,
aspect_range = None, relative_angle = 0, relative_scale = (1, 1), relative_translation = 0):
super().__init__()
self._angle_range = tuple(map(lambda x : x / 180 * math.pi, angle_range) )
self._scale_range = zoom_range
try:
translation_range = tuple(translation_range)
if len(translation_range) != 2:
raise ValueError('expect translation range to have shape [2,], but got {}'.format(translation_range))
except TypeError:
translation_range = (-translation_range, translation_range)
self._translation_range = tuple(map(lambda x : x * 2, translation_range))
self._target_shape = np.array(target_shape)
self._orig_shape = np.array(orig_shape)
self._batch_size = batch_size
self._unit = np.flip(self._target_shape - 1, axis=0).reshape([2,1]) / np.flip(self._orig_shape - 1, axis=0).reshape([1,2])
self._relative_scale = relative_scale
self._relative_angle = tuple(map(lambda x : x / 180 * math.pi * relative_angle, angle_range) )
self._relative_translation = (-relative_translation * 2, relative_translation * 2)
self._aspect_range = aspect_range
def _get_relative_transform(self, F):
aspect_ratio = (self._target_shape[0] - 1) / (self._target_shape[1] - 1)
rotation = F.random.uniform(*self._relative_angle, shape=(self._batch_size))
scale = F.random.uniform(*self._relative_scale, shape=(self._batch_size))
affine_params = [scale * rotation.cos(), scale * -rotation.sin() * aspect_ratio, F.zeros_like(scale),
scale * rotation.sin() / aspect_ratio, scale * rotation.cos(), F.zeros_like(scale),
F.zeros_like(scale), F.zeros_like(scale), F.ones_like(scale)]
affine = F.reshape(F.stack(*affine_params, axis=1), [0, 3, 3])
return affine
def hybrid_forward(self, F, img1, img2):
rotation = F.random.uniform(*self._angle_range, shape=(self._batch_size))
scale = F.random.uniform(*self._scale_range, shape=(self._batch_size))
if self._aspect_range is not None:
aspect_ratio = F.random.uniform(*self._aspect_range, shape=(self._batch_size))
else:
aspect_ratio = 1
pad_x, pad_y = 1 - scale * self._unit[0, 0], 1 - scale * self._unit[1, 1]
translation_x = F.random.uniform(-1, 1, shape=(self._batch_size,)) * pad_x + F.random.uniform(*self._translation_range, shape=(self._batch_size))
translation_y = F.random.uniform(-1, 1, shape=(self._batch_size,)) * pad_y + F.random.uniform(*self._translation_range, shape=(self._batch_size))
affine_params = [scale * aspect_ratio * rotation.cos() * self._unit[0, 0], scale * aspect_ratio * -rotation.sin() * self._unit[1, 0], translation_x,
scale * rotation.sin() * self._unit[0, 1], scale * rotation.cos() * self._unit[1, 1], translation_y]
affine_params = F.stack(*affine_params, axis=1)
rel_affine = self._get_relative_transform(F)
affine_2 = F.reshape(F.batch_dot(F.reshape(affine_params, [0, 2, 3]), rel_affine), [0, 6])
rel_translation = [F.zeros((self._batch_size,)), F.zeros((self._batch_size,)), F.random.uniform(*self._relative_translation, shape=(self._batch_size,)),
F.zeros((self._batch_size,)), F.zeros((self._batch_size,)), F.random.uniform(*self._relative_translation, shape=(self._batch_size))]
rel_translation = F.stack(*rel_translation, axis = 1)
affine_2 = affine_2 + rel_translation
grid = F.GridGenerator(data=affine_params, transform_type='affine', target_shape=list(self._target_shape))
img1 = F.BilinearSampler(data=img1, grid=grid)
grid_2 = F.GridGenerator(data=affine_2, transform_type='affine', target_shape=list(self._target_shape))
img2 = F.BilinearSampler(data=img2, grid=grid_2)
return img1, img2
'''
class ChromaticBrightnessAugmentation(nn.HybridBlock):
def __init__(self, brightness = 0.5, batch_size = 1, **kwargs):
super().__init__(**kwargs)
self.brightness = brightness
self.batch_size = batch_size
def hybrid_forward(self, F, img):
aug = img
alpha = 1.0 + F.random.uniform(-self.brightness, self.brightness, shape = (self.batch_size, 1, 1, 1))
aug = F.broadcast_mul(aug, alpha)
return aug
class ChromaticContrastAugmentation(nn.HybridBlock):
def __init__(self, contrast = 0.5, batch_size = 1, **kwargs):
super().__init__(**kwargs)
self.contrast = contrast
self.coefficient = [0.299, 0.587, 0.114]
self.batch_size = batch_size
def hybrid_forward(self, F, img):
aug = img
alpha = 1.0 + F.random.uniform(-self.contrast, self.contrast, shape = (self.batch_size, 1, 1, 1))
gray = F.concat(*[img.slice_axis(axis = 1, begin = k, end = k + 1) * self.coefficient[k] for k in range(3)], dim = 1)
mean = F.mean(gray, keepdims = True, axis = (1, 2, 3))
gray = 3.0 * (1.0 - alpha) * mean
aug = F.broadcast_mul(aug, alpha)
aug = F.broadcast_add(aug, gray)
return aug
'''
class ChromaticSHAugmentation(nn.HybridBlock):
def __init__(self, saturation = 0.5, hue = 0.5, batch_size = 1, **kwargs):
super().__init__(**kwargs)
self.saturation = saturation
self.hue = hue
self.matrix_yiq = [ [ 0.299, 0.587, 0.114],
[ 0.596, -0.274, -0.321],
[ 0.211, -0.523, -0.311]]
self.matrix_rgb = [ [ 1. , 0.956, 0.621],
[ 1. , -0.272, -0.647],
[ 1. , -1.107, 1.705]]
self.batch_size = batch_size
def hybrid_forward(self, F, img):
aug = img
alpha = 1.0 + F.random.uniform(-self.saturation, self.saturation, shape = (self.batch_size, 1, 1, 1))
theta = F.random.uniform(-self.hue * np.pi, self.hue * np.pi, shape = (self.batch_size, 1, 1, 1))
su = alpha * F.cos(theta)
sw = alpha * F.sin(theta)
matrix = [ [0.299 + 0.701 * su + 0.168 * sw, 0.587 - 0.587 * su + 0.330 * sw, 0.114 - 0.114 * su - 0.497 * sw],
[0.299 - 0.299 * su - 0.328 * sw, 0.587 + 0.413 * su + 0.035 * sw, 0.114 - 0.114 * su + 0.292 * sw],
[0.299 - 0.300 * su + 1.250 * sw, 0.587 - 0.588 * su - 1.050 * sw, 0.114 + 0.886 * su - 0.203 * sw]]
aug = F.concat(*[sum([F.broadcast_mul(aug.slice_axis(axis = 1, begin = j, end = j + 1), matrix[i][j]) for j in range(3)]) for i in range(3)], dim = 1)
return aug
'''
class ChromaticGammaAugmentation(nn.HybridBlock):
def __init__(self, gamma = (0.7, 1.5), batch_size = 1, **kwargs):
super().__init__(**kwargs)
self.gamma_min, self.gamma_max = gamma
self.batch_size = batch_size
def hybrid_forward(self, F, img):
aug = img
alpha = F.random.uniform(self.gamma_min, self.gamma_max, shape = (self.batch_size, 1, 1, 1))
aug = F.broadcast_power(aug, alpha)
return aug
class ChromaticEigenAugmentation(nn.HybridBlock):
def __init__(self, batch_size = 1, **kwargs):
super().__init__(**kwargs)
self.batch_size = batch_size
def hybrid_forward(self, F, img):
spin_angle = F.random.uniform(low = -np.pi, high = np.pi, shape = (self.batch_size, 3, 1, 1))
cos_ = [F.cos(spin_angle).slice_axis(axis = 1, begin = k, end = k + 1) for k in range(3)]
sin_ = [F.sin(spin_angle).slice_axis(axis = 1, begin = k, end = k + 1) for k in range(3)]
spin_matrix = [ [ cos_[0] * cos_[1], sin_[1] * cos_[2] + sin_[0] * cos_[1] * sin_[2], sin_[1] * sin_[2] - sin_[0] * cos_[1] * cos_[2]],
[- cos_[0] * sin_[1], cos_[1] * cos_[2] - sin_[0] * sin_[1] * sin_[2], cos_[1] * sin_[2] + sin_[0] * sin_[1] * cos_[2]],
[ sin_[0] , - cos_[0] * sin_[2] , cos_[0] * cos_[2] ]]
aug = F.concat(*[sum([F.broadcast_mul(img.slice_axis(axis = 1, begin = j, end = j + 1), spin_matrix[i][j]) for j in range(3)]) for i in range(3)], dim = 1)
return aug
class ChromaticComposeAugmentation(nn.Block):
def __init__(self, brightness = 0.2, contrast = 0.5, saturation = 0.5, hue = 0.5, gamma = (0.7, 1.5), batch_size = 1, **kwargs):
super().__init__(**kwargs)
self.brightness = brightness
self.contrast = contrast
self.saturation = saturation
self.hue = hue
self.gamma = gamma
self.batch_size = batch_size
self.aug_brightness = ChromaticBrightnessAugmentation(self.brightness, self.batch_size)
self.aug_contrast = ChromaticContrastAugmentation(self.contrast, self.batch_size)
self.aug_sh = ChromaticSHAugmentation(self.saturation, self.hue, self.batch_size)
self.augs = [self.aug_brightness, self.aug_contrast, self.aug_sh]
self.Gamma = ChromaticGammaAugmentation(self.gamma, self.batch_size)
def forward(self, img1, img2):
aug = nd.concat(img1, img2, dim = 2)
augs = random.sample(self.augs, 3)
for aug_op in augs:
aug = aug_op(aug)
aug = aug.clip(0, 1)
aug = self.Gamma(aug)
return nd.split(aug, axis = 2, num_outputs = 2)
'''
class ColorAugmentation(nn.HybridBlock):
def __init__(self, contrast_range, brightness_sigma, channel_range, batch_size, shape, noise_range,
saturation, hue, gamma_range = None, eigen_aug = False, **kwargs):
super().__init__(**kwargs)
self._contrast_range = contrast_range
self._brightness_sigma = brightness_sigma
self._channel_range = channel_range
self._batch_size = batch_size
self._shape = shape
self._noise_range = noise_range
self._gamma_range = gamma_range
self._eigen_aug = eigen_aug
self._saturation = saturation
self._hue = hue
def hybrid_forward(self, F, img1, img2):
contrast = F.random.uniform(*self._contrast_range, shape=(self._batch_size, 1, 1, 1)) + 1
brightness = F.random.normal(scale=self._brightness_sigma, shape=(self._batch_size, 1, 1, 1))
channel = F.random.uniform(*self._channel_range, shape=(self._batch_size, 3, 1, 1))
noise_sigma = F.random.uniform(*self._noise_range)
if self._gamma_range is not None:
gamma = F.random.uniform(*self._gamma_range, shape = (self._batch_size, 1, 1, 1))
contrast = contrast.repeat(repeats=3, axis=1)
brightness = brightness.repeat(repeats=3, axis=1)
alpha = 1.0 + F.random.uniform(-self._saturation, self._saturation, shape = (self._batch_size, 1, 1, 1))
theta = F.random.uniform(-self._hue * np.pi, self._hue * np.pi, shape = (self._batch_size, 1, 1, 1))
su = alpha * F.cos(theta)
sw = alpha * F.sin(theta)
sh_matrix = [ [0.299 + 0.701 * su + 0.168 * sw, 0.587 - 0.587 * su + 0.330 * sw, 0.114 - 0.114 * su - 0.497 * sw],
[0.299 - 0.299 * su - 0.328 * sw, 0.587 + 0.413 * su + 0.035 * sw, 0.114 - 0.114 * su + 0.292 * sw],
[0.299 - 0.300 * su + 1.250 * sw, 0.587 - 0.588 * su - 1.050 * sw, 0.114 + 0.886 * su - 0.203 * sw]]
if self._eigen_aug:
spin_angle = F.random.uniform(low = -np.pi, high = np.pi, shape = (self._batch_size, 3, 1, 1))
cos_ = [F.cos(spin_angle).slice_axis(axis = 1, begin = k, end = k + 1) for k in range(3)]
sin_ = [F.sin(spin_angle).slice_axis(axis = 1, begin = k, end = k + 1) for k in range(3)]
spin_matrix = [ [ cos_[0] * cos_[1], sin_[1] * cos_[2] + sin_[0] * cos_[1] * sin_[2], sin_[1] * sin_[2] - sin_[0] * cos_[1] * cos_[2]],
[-cos_[0] * sin_[1], cos_[1] * cos_[2] - sin_[0] * sin_[1] * sin_[2], cos_[1] * sin_[2] + sin_[0] * sin_[1] * cos_[2]],
[ sin_[0] ,-cos_[0] * sin_[2] , cos_[0] * cos_[2] ]]
ret = []
for img in (img1, img2):
aug = img
aug = F.concat(*[sum([F.broadcast_mul(aug.slice_axis(axis = 1, begin = j, end = j + 1), sh_matrix[i][j]) for j in range(3)]) for i in range(3)], dim = 1)
noise = F.random.normal(scale=1, shape=(self._batch_size, 3) + tuple(self._shape))
aug = aug + noise * noise_sigma
mean = F.mean(aug, keepdims=True, axis=(2,3))
aug = F.broadcast_minus(aug, mean)
aug = F.broadcast_mul(aug, contrast * channel)
if self._eigen_aug:
aug = F.concat(*[sum([F.broadcast_mul(aug.slice_axis(axis = 1, begin = j, end = j + 1), spin_matrix[i][j]) for j in range(3)]) for i in range(3)], dim = 1)
aug = F.broadcast_add(aug, mean * channel + brightness)
aug = F.clip(aug, 0, 1)
if self._gamma_range is not None:
aug = F.broadcast_power(aug, F.exp(gamma))
ret.append(aug)
return ret
class GeometryAugmentation(nn.HybridBlock):
def __init__(self, angle_range, zoom_range, translation_range, target_shape, orig_shape, batch_size,
aspect_range = None, relative_angle=None, relative_scale=None, relative_translation=None):
super().__init__()
self._angle_range = tuple(map(lambda x : x / 180 * math.pi, angle_range) )
self._scale_range = zoom_range
try:
translation_range = tuple(translation_range)
if len(translation_range) != 2:
raise ValueError('expect translation range to have shape [2,], but got {}'.format(translation_range))
except TypeError:
translation_range = (-translation_range, translation_range)
self._translation_range = tuple(map(lambda x : x * 2, translation_range))
self._target_shape = np.array(target_shape)
self._orig_shape = np.array(orig_shape)
self._batch_size = batch_size
self._unit | |
from collections import defaultdict
import numpy as np
import torch
from pytorch_points.network.operations import faiss_knn, dot_product, batch_svd, ball_query, group_knn
from pytorch_points.utils.pytorch_utils import save_grad, linear_loss_weight
from pytorch_points.network.model_loss import nndistance, labeled_nndistance
from pytorch_points.network.geo_operations import (compute_face_normals_and_areas, dihedral_angle,
CotLaplacian, UniformLaplacian, batch_normals)
from pytorch_points.network.model_loss import (MeshLaplacianLoss, PointEdgeLengthLoss, \
MeshStretchLoss, PointStretchLoss, PointLaplacianLoss,
SimpleMeshRepulsionLoss, MeshEdgeLengthLoss,
NormalLoss)
from pytorch_points.misc import logger
class AllLosses(torch.nn.Module):
def __init__(self, opt):
super().__init__()
self.opt = opt
self.loss = defaultdict(float)
self.labeled_chamfer_loss = LabeledChamferDistance(beta=opt.beta, gamma=opt.gamma, delta=opt.delta)
self.cage_shortLength_loss = SimpleMeshRepulsionLoss(0.02, reduction="mean", consistent_topology=True)
self.cage_faceAngle_loss = MeshDihedralAngleLoss(threshold=np.pi/30)
self.mvc_reg_loss = MVCRegularizer(threshold=50, beta=1.0, alpha=0.0)
self.cage_laplacian = MeshLaplacianLoss(torch.nn.L1Loss(reduction="mean"), use_cot=False, use_norm=True,
consistent_topology=True, precompute_L=True)
self.cage_smooth_loss = MeshSmoothLoss(torch.nn.MSELoss(reduction="mean"), use_cot=False, use_norm=True)
self.grounding_loss = GroundingLoss(up_dim=(1 if "SHAPENET" in opt.dataset else 2))
if opt.sym_plane is not None:
self.symmetry_loss = SymmetryLoss(sym_plane=opt.sym_plane, NCHW=False).cuda()
# mesh_chamfer_loss = losses.InterpolatedCDTriMesh(interpolate_n=5, beta=1.0, gamma=0.0, delta=1/30)
# cage_inside_loss = InsideLoss3DTriMesh(reduction="max")
# cage_inside_loss = ExtPointToNearestFaceDistance(reduction="mean", min_dist=opt.cinside_eps)
if self.opt.dataset in ("SURREAL", "FAUST"):
logger.info("Using GTNormal loss")
self.shape_normal_loss = GTNormalLoss()
else:
logger.info("Using KNN for normal loss")
self.shape_normal_loss = NormalLoss(reduction="none", nn_size=16)
self.shape_fnormal_loss = FaceNormalLoss(n_faces=300)
self.stretch_loss = PointStretchLoss((4 if opt.dim==3 else 2), reduction="mean")
self.edge_loss = PointEdgeLengthLoss((4 if opt.dim==3 else 2), torch.nn.MSELoss(reduction="mean"))
if self.opt.regular_sampling or (not opt.mesh_data):
logger.info("Using point laplacian loss")
self.shape_laplacian = PointLaplacianLoss(16, torch.nn.MSELoss(reduction="none"), use_norm=opt.slap_norm)
else:
logger.info("Using mesh laplacian loss")
self.shape_laplacian = MeshLaplacianLoss(torch.nn.MSELoss(reduction="none"), use_cot=True,
use_norm=True, consistent_topology=True, precompute_L=True)
self.p2f_loss = LocalFeatureLoss(16, torch.nn.MSELoss(reduction="none"))
def forward(self, all_inputs, all_outputs, progress=1.0):
self.loss.clear()
B = all_outputs["new_cage"].shape[0]
# ======== cage deformation back and forth ============= #
if self.opt.loss == "LCD":
loss, idx12, idx21 = self.labeled_chamfer_loss(all_outputs["deformed"],
all_inputs["target_shape"],
all_inputs["source_label"],
all_inputs["target_label"])
self.idx12 = idx12.to(dtype=torch.int64)
self.idx21 = idx21.to(dtype=torch.int64)
self.loss["LCD"] += loss*opt.loss_weight
# S-to-S use MSE
dist = torch.sum((all_outputs["deformed"][self.opt.batch_size*2:, :, :] - all_inputs["target_shape"][self.opt.batch_size*2:,:,:])**2, dim=-1)
self.loss["MSE"] += dist.mean()*opt.loss_weight
elif self.opt.loss == "CD":
loss, idx12, idx21 = self.labeled_chamfer_loss(all_outputs["deformed"],
all_inputs["target_shape"])
self.loss["CD"] = loss
self.loss["CD"] *= self.opt.loss_weight
self.idx12 = idx12.to(dtype=torch.int64)
self.idx21 = idx21.to(dtype=torch.int64)
# S-to-S use MSE
dist = torch.sum((all_outputs["deformed"][self.opt.batch_size*2:, :, :] - all_inputs["target_shape"][self.opt.batch_size*2:,:,:])**2, dim=-1)
self.loss["MSE"] += dist.mean()*self.opt.loss_weight
elif self.opt.loss == "MSE":
dist = torch.sum((all_outputs["deformed"] - all_inputs["target_shape"])**2, dim=-1)
self.loss["MSE"] += dist.mean()
self.loss["MSE"] += torch.max(dist, dim=1)[0].mean()
self.loss["MSE"] *= self.opt.loss_weight
# self.loss["MSE"] += torch.sum((all_outputs["t_deformed"] - all_inputs["source_shape"])**2, dim=-1).mean()
# ======== cage surface close to the source shape ============= #
if self.opt.cshape_weight > 0:
ref_surface = all_inputs["source_shape"]+0.1*all_inputs["source_normals"]
loss, _, _ = self.labeled_chamfer_loss(all_outputs["cage"], ref_surface)
self.loss["CSHAPE"] += loss
self.loss["CSHAPE"] *= linear_loss_weight(self.opt.nepochs, progress, self.opt.cshape_weight, 0)
# ======== cage center must be close to shape center ========== #
if self.opt.gravity_weight > 0:
cage_shift = torch.mean(all_outputs["cage"], dim=1) - torch.mean(all_inputs["source_shape"], dim=1)
self.loss["GRAV"] += torch.mean(torch.nn.functional.softshrink(torch.sum(cage_shift**2, dim=-1), lambd=0.1))
# cage_shift = torch.mean(all_outputs["new_cage"], dim=1) - torch.mean(all_inputs["target_shape"], dim=1)
# self.loss["GRAV"] += torch.mean(torch.nn.functional.softshrink(torch.sum(cage_shift**2, dim=-1), lambd=0.1))
self.loss["GRAV"] *= self.opt.gravity_weight
# ======== penalize large unnormalized weight and/or negative weights ========== #
if self.opt.mvc_weight > 0:
self.loss["WREG"] += self.mvc_reg_loss(all_outputs["weight"]) * self.opt.mvc_weight
# ======== feature preservation via point to surface ======== #
if self.opt.p2f_weight > 0:
self.loss["P2F"] = torch.mean(self.p2f_loss(all_inputs["source_shape"], all_outputs["deformed"]))
self.loss["P2F"] *= linear_loss_weight(self.opt.nepochs, progress, self.opt.p2f_weight, self.opt.p2f_weight/10)
# ======== feature preservation via laplacian ========== #
if self.opt.slap_weight > 0:
# reduction none (B,P)
slap1 = torch.mean(
self.shape_laplacian(all_inputs["source_shape"], all_outputs["deformed"], face=all_inputs["source_face"]).view(B,-1),
dim=-1, keepdim=True)
# use idx12 to get the closest points on the target, laplacian of these points compute
if self.opt.blend_style and hasattr(self, "idx21"):
slap1 *= (1-all_inputs["alpha"])
# slap2 = 0.5*torch.mean(self.shape_laplacian(all_outputs["deformed"], all_inputs["target_shape"], idx12=self.idx12), dim=-1)
slap2 = torch.mean(
self.shape_laplacian(all_outputs["deformed"], all_inputs["target_shape"], idx12=self.idx12).view(B,-1),
dim=-1, keepdim=True)
slap2 *= all_inputs["alpha"]
self.loss["SLAP"] += slap2.mean()
self.loss["SLAP"] += slap1.mean()
self.loss["SLAP"] *= linear_loss_weight(self.opt.nepochs, progress, self.opt.slap_weight, self.opt.slap_weight/10)
# ======== feature preservation via normal ================= #
if self.opt.snormal_weight > 0:
snormal1 = torch.mean(
self.shape_normal_loss(all_inputs["source_shape"], all_outputs["deformed"]), dim=-1, keepdim=True)
if self.opt.blend_style and hasattr(self, "idx21"):
snormal1 *= (1-all_inputs["alpha"])
# snormal2 = 0.5*torch.mean(self.shape_normal_loss(all_inputs["deformed"], all_inputs["target_shape"], idx=self.idx12), dim=-1)
snormal2 = torch.mean(
self.shape_normal_loss(all_outputs["deformed"], all_inputs["target_shape"], idx12=self.idx12),
dim=-1, keepdim=True)
snormal2 *= all_inputs["alpha"]
self.loss["SNORMAL"] += snormal2.mean()
self.loss["SNORMAL"] += snormal1.mean()
self.loss["SNORMAL"] *= linear_loss_weight(self.opt.nepochs, progress, self.opt.snormal_weight, self.opt.snormal_weight/10)
# ======== enforce symmetry on cage ========== #
if self.opt.sym_weight > 0:
self.loss["SYM"] += self.symmetry_loss(all_outputs["deformed"])
self.loss["SYM"] += self.symmetry_loss(all_outputs["cage"])
self.loss["SYM"] *= self.opt.sym_weight
# ======== enforce to stay on the ground ========== #
if self.opt.ground_weight > 0:
self.loss["GROUND"] += self.grounding_loss(all_inputs["source_shape"], all_outputs["deformed"])
self.loss["GROUND"] *= self.opt.ground_weight
# ======== cage face angle should be larger than pi/6 ========== #
if self.opt.cfangle_weight > 0:
# self.loss["CFANGLE"] += self.cage_faceAngle_loss(all_outputs["cage"], edge_points=all_inputs["cage_edge_points"])
self.loss["CFANGLE"] += self.cage_faceAngle_loss(all_outputs["new_cage"], edge_points=all_inputs["cage_edge_points"])
self.loss["CFANGLE"] *= self.opt.cfangle_weight
# ======== cage face angle should be larger than pi/6 ========== #
if self.opt.csmooth_weight > 0:
# self.loss["CSMOOTH"] += self.cage_smooth_loss(all_outputs["cage"], face=all_outputs["cage_face"])
self.loss["CSMOOTH"] += self.cage_smooth_loss(all_outputs["new_cage"], face=all_outputs["cage_face"])
self.loss["CSMOOTH"] *= self.opt.csmooth_weight
# ======== penalize cage with very short edges ================= #
if self.opt.cshort_weight > 0:
# TODO add cage_edges to all_inputs
self.loss["CEDGE"] = self.cage_shortLength_loss(all_outputs["cage"], edges=all_inputs["cage_edges"])
# self.loss["CEDGE"] = self.cage_shortLength_loss(all_outputs["t_cage"], edges=all_inputs["cage_edges"])
self.loss["CEDGE"] *= self.opt.cshort_weight
# ======== require new cage similar to cage ================= #
if self.opt.clap_weight > 0:
self.loss["CLAP"] += self.cage_laplacian(all_outputs["cage"].expand(B,-1,-1).contiguous().detach(),
all_outputs["new_cage"].contiguous(), face=all_outputs["cage_face"])
self.loss["CLAP"] *= self.opt.clap_weight
# ======== penalize increasing point distance ================= #
if self.opt.sstretch_weight > 0:
self.loss["SSTRETCH"] += self.stretch_loss(all_outputs["source_shape"], all_outputs["deformed"])*self.opt.sstretch_weight
# ======== penalize knn distance change ================= #
if self.opt.sedge_weight > 0:
self.loss["SEDGE"] += self.edge_loss(all_outputs["source_shape"], all_outputs["deformed"])
self.loss["SEDGE"] *= linear_loss_weight(self.opt.nepochs, progress, self.opt.sedge_weight, self.opt.sedge_weight/10)
if self.opt.sfnormal_weight > 0:
# randomly compare a subset of face normals
self.loss["SFNORMAL"] += self.shape_fnormal_loss(all_inputs["target_mesh"], all_outputs["deformed_hr"],
all_inputs["source_face"].expand(B,-1,-1))
self.loss["SFNORMAL"] *= linear_loss_weight(self.opt.nepochs, progress, self.opt.sfnormal_weight, self.opt.sfnormal_weight/10)
return self.loss
class FaceNormalLoss(torch.nn.Module):
def __init__(self, n_faces=100):
super().__init__()
self.n_faces= n_faces
self.cos = torch.nn.CosineSimilarity(dim=-1, eps=1e-08)
def forward(self, ref_mesh_V, mesh_V, mesh_F):
B, F, _ = mesh_F.shape
face_sample_idx = torch.randint(min(self.n_faces, F), (B, self.n_faces, 1), dtype=torch.int64).to(device=mesh_F.device)
sampled_F = torch.gather(mesh_F, 1, face_sample_idx.expand(-1,-1,3))
ref_normals,_ = compute_face_normals_and_areas(ref_mesh_V, mesh_F)
normals,_ = compute_face_normals_and_areas(mesh_V, mesh_F)
cos = self.cos(ref_normals, normals)
return torch.mean(1-cos)
class GroundingLoss(torch.nn.Module):
def __init__(self, up_dim=1):
super().__init__()
self.up_dim = up_dim # if z is the up direction, the up_dim = 2
# previous ground stays on the ground
def forward(self, source, deformed):
"""
source: (B,N,3)
deformed: (B,N,3)
"""
eps = 1e-2
ground_level = torch.min(source[:,:,self.up_dim], dim=1)[0]
ground_point_mask = (source[:,:,self.up_dim] - ground_level.unsqueeze(-1)).abs() < eps
source_ground_level = torch.masked_select(source[:,:,self.up_dim], ground_point_mask)
deformed_ground_level = torch.masked_select(deformed[:,:,self.up_dim], ground_point_mask)
return torch.mean(torch.abs(source_ground_level - deformed_ground_level))
# class SymmetricPointFaceDistance(nn.Module):
# def forward(self, input, input_normals, target, target_normals):
class ExtPointToNearestFaceDistance(torch.nn.Module):
"""
for every exteror points return the squared distance to the closest face
"""
def __init__(self, min_dist=0.1, reduction="mean"):
super().__init__()
self.min_dist = min_dist
self.reduction = reduction
def forward(self, mesh_V, mesh_F, points, exterior_flag, mesh_FN=None):
"""
mesh_V (B,N,3)
mesh_F (B,F,3)
mesh_FN (B,F,3)
points (B,P,3)
exterior_flat (B,P,1)
"""
if mesh_FN is None:
mesh_FN, _ = compute_face_normals_and_areas(mesh_V, mesh_F)
mesh_FN = mesh_FN.detach()
else:
mesh_FN = mesh_FN.detach()
B, F, _ = mesh_F.shape
_, N, D = mesh_V.shape
_, P, D = points.shape
# (B,N,D) (B,F,3) -> (B,F,3,3) face points
face_points = torch.gather(mesh_V.unsqueeze(1).expand(-1,F,-1,-1), 2, mesh_F.unsqueeze(-1).expand(-1,-1,-1,3))
# (B,F,3)
face_center = torch.mean(face_points, dim=-2)
# (B,P,F,3)
point_to_face_center = points.unsqueeze(2) - face_center.unsqueeze(1)
# point to face distance (B,P,F,3)
point_to_face_signed_dist = (dot_product(point_to_face_center, mesh_FN.unsqueeze(1), dim=-1, keepdim=True)+self.min_dist)
point_to_face_v = point_to_face_signed_dist * mesh_FN.unsqueeze(1)
# (B,P,F)
point_to_face_sqdist = torch.sum(point_to_face_v*point_to_face_v, dim=-1)
# ignore faces outside the points
point_to_face_sqdist.masked_fill_(point_to_face_signed_dist.squeeze(-1)<0, 1e10)
# (B,P)
point_to_face_sqdist, _ = torch.min(point_to_face_sqdist, dim=-1)
# ignore interior points
inside_flag = (~exterior_flag.view(B,P))| torch.all(point_to_face_signed_dist.view(B,P,F)<0, dim=-1)
point_to_face_sqdist.masked_fill_(inside_flag, 0)
if self.reduction == "mean":
point_to_face_sqdist = torch.mean(point_to_face_sqdist.view(B,-1), dim=1)
elif self.reduction == "max":
point_to_face_sqdist = torch.max(point_to_face_sqdist.view(B,-1), dim=1)[0]
elif self.reduction == "sum":
point_to_face_sqdist = torch.sum(point_to_face_sqdist.view(B,-1), dim=1)
elif self.reduction == "none":
pass
else:
raise NotImplementedError
point_to_face_sqdist = torch.mean(point_to_face_sqdist, dim=0)
return point_to_face_sqdist
class MVCRegularizer(torch.nn.Module):
"""
penalize MVC with large absolute value and negative values
alpha * large_weight^2 + beta * (negative_weight)^2
"""
def __init__(self, alpha=1.0, beta=1.0, threshold=5.0):
super().__init__()
self.alpha = alpha
self.beta = beta
self.threshold = threshold
def forward(self, weights):
# ignore all weights <= 5
# B, N, F, _ = loss.shape
loss = 0
if self.alpha > 0:
large_loss = torch.log(torch.nn.functional.relu(weights.abs()-self.threshold)+1)
# large_loss = large_loss ** 2
loss += (torch.mean(large_loss)) * self.alpha
if self.beta > 0:
neg_loss = torch.nn.functional.relu(-weights)
neg_loss = neg_loss ** 2
loss += (torch.mean(neg_loss)) * self.beta
return loss
class LabeledChamferDistance(torch.nn.Module):
"""
Learning to Sample Dovrat et.al
mean_{xyz1}(nd_{1to2})+\beta*max_{xyz1}(nd_{1to2})+(\gamma+\delta|xyz1|)mean_{xyz2}(nd_{2to1})
===
:param:
xyz1: generated points
xyz2: reference points
"""
def __init__(self, beta=1.0, gamma=1, delta=0):
super().__init__()
self.beta = beta
self.gamma = gamma
self.delta = delta
def forward(self, xyz1, xyz2, label1=None, label2=None):
P = xyz1.shape[1]
if label1 is not None and label2 is not None:
dist12, dist21, idx12, idx21 = labeled_nndistance(xyz1, xyz2, label1, label2)
else:
dist12, dist21, idx12, idx21 = nndistance(xyz1, xyz2)
# pred2gt is for each element in gt, the closest distance to this element
loss = torch.mean(dist12, dim=-1) + torch.max(dist12, dim=-1)[0]*self.beta + (self.gamma+self.delta*P)*(torch.mean(dist21, dim=-1))
loss = torch.mean(loss)
return loss, idx12, idx21
class SymmetryLoss(torch.nn.Module):
"""
symmetry loss
chamfer(mirrored(xyz), xyz)
| |
self.k.vehicle.get_position(veh_id)
dist = edge_len - relative_pos
return dist
def _convert_edge(self, edges):
"""Convert the string edge to a number.
Start at the bottom left vertical edge and going right and then up, so
the bottom left vertical edge is zero, the right edge beside it is 1.
The numbers are assigned along the lowest column, then the lowest row,
then the second lowest column, etc. Left goes before right, top goes
before bottom.
The values are zero indexed.
Parameters
----------
edges : list of str or str
name of the edge(s)
Returns
-------
list of int or int
a number uniquely identifying each edge
"""
if isinstance(edges, list):
return [self._split_edge(edge) for edge in edges]
else:
return self._split_edge(edges)
def _split_edge(self, edge):
"""Act as utility function for convert_edge."""
if edge:
if edge[0] == ":": # center
center_index = int(edge.split("center")[1][0])
base = ((self.cols + 1) * self.rows * 2) \
+ ((self.rows + 1) * self.cols * 2)
return base + center_index + 1
else:
pattern = re.compile(r"[a-zA-Z]+")
edge_type = pattern.match(edge).group()
edge = edge.split(edge_type)[1].split('_')
row_index, col_index = [int(x) for x in edge]
if edge_type in ['bot', 'top']:
rows_below = 2 * (self.cols + 1) * row_index
cols_below = 2 * (self.cols * (row_index + 1))
edge_num = rows_below + cols_below + 2 * col_index + 1
return edge_num if edge_type == 'bot' else edge_num + 1
if edge_type in ['left', 'right']:
rows_below = 2 * (self.cols + 1) * row_index
cols_below = 2 * (self.cols * row_index)
edge_num = rows_below + cols_below + 2 * col_index + 1
return edge_num if edge_type == 'left' else edge_num + 1
else:
return 0
def _get_relative_node(self, agent_id, direction):
"""Yield node number of traffic light agent in a given direction.
For example, the nodes in a traffic light grid with 2 rows and 3
columns are indexed as follows:
| | |
--- 3 --- 4 --- 5 ---
| | |
--- 0 --- 1 --- 2 ---
| | |
See flow.networks.traffic_light_grid for more information.
Example of function usage:
- Seeking the "top" direction to ":center0" would return 3.
- Seeking the "bottom" direction to ":center0" would return -1.
Parameters
----------
agent_id : str
agent id of the form ":center#"
direction : str
top, bottom, left, right
Returns
-------
int
node number
"""
ID_IDX = 1
agent_id_num = int(agent_id.split("center")[ID_IDX])
if direction == "top":
node = agent_id_num + self.cols
if node >= self.cols * self.rows:
node = -1
elif direction == "bottom":
node = agent_id_num - self.cols
if node < 0:
node = -1
elif direction == "left":
if agent_id_num % self.cols == 0:
node = -1
else:
node = agent_id_num - 1
elif direction == "right":
if agent_id_num % self.cols == self.cols - 1:
node = -1
else:
node = agent_id_num + 1
else:
raise NotImplementedError
return node
def additional_command(self):
"""See parent class.
Used to insert vehicles that are on the exit edge and place them
back on their entrance edge.
"""
for veh_id in self.k.vehicle.get_ids():
self._reroute_if_final_edge(veh_id)
def _reroute_if_final_edge(self, veh_id):
"""Reroute vehicle associated with veh_id.
Checks if an edge is the final edge. If it is return the route it
should start off at.
"""
edge = self.k.vehicle.get_edge(veh_id)
if edge == "":
return
if edge[0] == ":": # center edge
return
pattern = re.compile(r"[a-zA-Z]+")
edge_type = pattern.match(edge).group()
edge = edge.split(edge_type)[1].split('_')
row_index, col_index = [int(x) for x in edge]
# find the route that we're going to place the vehicle on if we are
# going to remove it
route_id = None
if edge_type == 'bot' and col_index == self.cols:
route_id = "bot{}_0".format(row_index)
elif edge_type == 'top' and col_index == 0:
route_id = "top{}_{}".format(row_index, self.cols)
elif edge_type == 'left' and row_index == 0:
route_id = "left{}_{}".format(self.rows, col_index)
elif edge_type == 'right' and row_index == self.rows:
route_id = "right0_{}".format(col_index)
if route_id is not None:
type_id = self.k.vehicle.get_type(veh_id)
lane_index = self.k.vehicle.get_lane(veh_id)
# remove the vehicle
self.k.vehicle.remove(veh_id)
# reintroduce it at the start of the network
self.k.vehicle.add(
veh_id=veh_id,
edge=route_id,
type_id=str(type_id),
lane=str(lane_index),
pos="0",
speed="max")
def get_closest_to_intersection(self, edges, num_closest, padding=False):
"""Return the IDs of the vehicles that are closest to an intersection.
For each edge in edges, return the IDs (veh_id) of the num_closest
vehicles in edge that are closest to an intersection (the intersection
they are heading towards).
This function performs no check on whether or not edges are going
towards an intersection or not, it just gets the vehicles that are
closest to the end of their edges.
If there are less than num_closest vehicles on an edge, the function
performs padding by adding empty strings "" instead of vehicle ids if
the padding parameter is set to True.
Parameters
----------
edges : str | str list
ID of an edge or list of edge IDs.
num_closest : int (> 0)
Number of vehicles to consider on each edge.
padding : bool (default False)
If there are less than num_closest vehicles on an edge, perform
padding by adding empty strings "" instead of vehicle ids if the
padding parameter is set to True (note: leaving padding to False
while passing a list of several edges as parameter can lead to
information loss since you will not know which edge, if any,
contains less than num_closest vehicles).
Usage
-----
For example, consider the following network, composed of 4 edges
whose ids are "edge0", "edge1", "edge2" and "edge3", the numbers
being vehicles all headed towards intersection x. The ID of the vehicle
with number n is "veh{n}" (edge "veh0", "veh1"...).
edge1
| |
| 7 |
| 8 |
-------------| |-------------
edge0 1 2 3 4 5 6 x edge2
-------------| |-------------
| 9 |
| 10|
| 11|
edge3
And consider the following example calls on the previous network:
>>> get_closest_to_intersection("edge0", 4)
["veh6", "veh5", "veh4", "veh3"]
>>> get_closest_to_intersection("edge0", 8)
["veh6", "veh5", "veh4", "veh3", "veh2", "veh1"]
>>> get_closest_to_intersection("edge0", 8, padding=True)
["veh6", "veh5", "veh4", "veh3", "veh2", "veh1", "", ""]
>>> get_closest_to_intersection(["edge0", "edge1", "edge2", "edge3"],
3, padding=True)
["veh6", "veh5", "veh4", "veh8", "veh7", "", "", "", "", "veh9",
"veh10", "veh11"]
Returns
-------
str list
If n is the number of edges given as parameters, then the returned
list contains n * num_closest vehicle IDs.
Raises
------
ValueError
if num_closest <= 0
"""
if num_closest <= 0:
raise ValueError("Function get_closest_to_intersection called with"
"parameter num_closest={}, but num_closest should"
"be positive".format(num_closest))
if isinstance(edges, list):
ids = [self.get_closest_to_intersection(edge, num_closest)
for edge in edges]
# flatten the list and return it
return [veh_id for sublist in ids for veh_id in sublist]
# get the ids of all the vehicles on the edge 'edges' ordered by
# increasing distance to end of edge (intersection)
veh_ids_ordered = sorted(self.k.vehicle.get_ids_by_edge(edges),
key=self.get_distance_to_intersection)
# return the ids of the num_closest vehicles closest to the
# intersection, potentially with ""-padding.
pad_lst = [""] * (num_closest - len(veh_ids_ordered))
return veh_ids_ordered[:num_closest] + (pad_lst if padding else [])
class TrafficLightGridPOEnv(TrafficLightGridEnv):
"""Environment used to train traffic lights.
Required from env_params:
* switch_time: minimum switch time for each traffic light (in seconds).
Earlier RL commands are ignored.
* num_observed: number of vehicles nearest each intersection that is
observed in the state space; defaults to 2
States
An observation is the number of observed vehicles in each intersection
closest to the traffic lights, a number uniquely identifying which
edge the vehicle is on, and the speed of the vehicle.
Actions
The action space consist of a list of float variables ranging from 0-1
specifying whether a traffic light is supposed to switch or not. The
actions are sent to the traffic light in the grid from left to right
and then top to bottom.
Rewards
The reward is the delay of each vehicle minus a penalty for switching
traffic lights
Termination
A rollout is terminated once the time horizon is reached.
Additional
Vehicles are rerouted to the start of their original routes once they
| |
import math
from collections import Counter, defaultdict
from datetime import datetime
import networkx as nx
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import pyproj
from numpy import mean, nanmean
from cognite.power.data_classes import PowerAssetList
# unified plotting colors
_MARKER_EDGE_COLOR = "rgb(85,150,210)"
_MARKER_FILL_COLOR = "rgb(230,230,230)"
# univeral transverse mercator zone 32 = south norway, germany
_LATLON_PROJ = "+proj=utm +zone=32, +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs"
_PROJECTION = pyproj.Proj(_LATLON_PROJ, preserve_units=True)
def _latlon_to_xy(lat, lon):
(x, y) = (lat, lon)
return (x, y)
def voltage_color(base_voltage: float):
color_map = [
(-1e9, "000000"),
(100, "000000"),
(132, "9ACA3C"),
(300, "20B3DE"),
(420, "ED1C24"),
(1e9, "ED1C24"),
]
color_map = [(v, tuple(int(h[i : i + 2], 16) for i in (0, 2, 4))) for v, h in color_map] # to rgb
ix_above = 0
while color_map[ix_above][0] < base_voltage:
ix_above += 1
t = (base_voltage - color_map[ix_above - 1][0]) / (color_map[ix_above][0] - color_map[ix_above - 1][0])
color = [
int(color_map[ix_above - 1][1][rgb] + t * (color_map[ix_above][1][rgb] - color_map[ix_above - 1][1][rgb]))
for rgb in range(3)
]
c = ",".join(map(str, color))
return f"rgb({c})"
def _flow_color(flow: float):
return voltage_color(base_voltage=flow)
def node_locations(power_area, interpolate_missing_positions=True):
node_loc = {
name: [
float(substation.metadata.get("PositionPoint.xPosition", math.nan)),
float(substation.metadata.get("PositionPoint.yPosition", math.nan)),
]
for name, substation in power_area._graph.nodes(data="object")
}
if interpolate_missing_positions:
orphan_count = 0
for it in range(2):
for s, loc in node_loc.items():
if math.isnan(loc[0]):
nb_locs = [
node_loc[n] for n in nx.neighbors(power_area._graph, s) if not math.isnan(node_loc[n][0])
]
mean_loc = [sum(c) / len(nb_locs) for c in zip(*nb_locs)]
if len(mean_loc) == 2:
node_loc[s] = mean_loc
elif it == 1:
node_loc[s] = [20, 55 + orphan_count] # TODO don't hardcode this
orphan_count += 1
return node_loc
def node_layout(power_area, position):
if position == "source":
node_positions = node_locations(power_area)
elif position == "project":
node_positions = {n: _latlon_to_xy(*xy) for n, xy in node_locations(power_area).items()}
elif position == "spring":
node_positions = nx.spring_layout(power_area._graph)
elif position == "kamada":
node_positions = nx.kamada_kawai_layout(power_area._graph)
else:
raise ValueError(f"Unknown layout {position}")
return node_positions
def create_substation_plot(node_locations, node_plot_mode):
text, x, y = zip(*[(k, v[0], v[1]) for k, v in node_locations.items()])
return go.Scatter(
x=x,
y=y,
text=text,
mode=node_plot_mode,
textposition="top center",
hoverinfo="text",
marker=dict(size=15, line=dict(color=_MARKER_EDGE_COLOR, width=2), color=_MARKER_FILL_COLOR),
)
def create_substation_map_plot(node_locations):
text, lon, lat = zip(*[(k, v[0], v[1]) for k, v in node_locations.items()])
# to get an edge color we plot the same data twice with difference marker size
plots = [
go.Scattermapbox(lat=lat, lon=lon, showlegend=False, marker=dict(size=17, color=_MARKER_EDGE_COLOR),),
go.Scattermapbox(
lat=lat,
lon=lon,
text=text,
mode="markers",
showlegend=False,
hoverinfo="text",
marker=dict(size=13, color=_MARKER_FILL_COLOR),
textposition="top center",
),
]
return plots
def edge_locations(power_area, node_locations):
# there is a gotcha here that having 100s of line plots is resource intensive, so making one for each
# ac line segment causes computers to catch fire. To get the coloring right we create one for each
# base voltage value, and then we split the line by adding nans. This makes the function unintuitive.
networkx_edges = power_area._graph.edges(data=True)
lons = defaultdict(list)
lats = defaultdict(list)
center_lons = defaultdict(list)
center_lats = defaultdict(list)
text = defaultdict(list)
counter = Counter([(edge[0], edge[1]) for edge in list(power_area._graph.edges(data=True))])
dups = {key: 1 for key in counter if counter[key] + counter[key[::-1]] == 2} # TODO: handle 3?
for acls in networkx_edges:
lon, lat = zip(*[node_locations[s] for s in acls[:2]])
center_lat = mean(lat)
center_lon = mean(lon)
if (acls[0], acls[1]) in dups:
# probably there are more elegant ways, but we want to offset the center in cases where there are multiple
# lines between two substations
lat_len = abs(lat[1] - lat[0])
lon_len = abs(lon[1] - lon[0])
edge_length = math.sqrt((lat_len) ** 2 + (lon_len) ** 2)
center_lat += 0.005 * dups[(acls[0], acls[1])] * lon_len / edge_length
center_lon += 0.005 * dups[(acls[0], acls[1])] * lat_len / edge_length
dups[(acls[0], acls[1])] *= -1
base_voltage = acls[2]["object"].metadata.get("BaseVoltage_nominalVoltage", "0")
lats[base_voltage] += [lat[0], center_lat, lat[1], math.nan]
lons[base_voltage] += [lon[0], center_lon, lon[1], math.nan]
center_lons[base_voltage].append(center_lon)
center_lats[base_voltage].append(center_lat)
text[base_voltage].append("{}: {} kV".format(acls[2]["object"].name, base_voltage))
return lats, lons, center_lats, center_lons, text
def create_line_segment_plot(x, y, center_x, center_y, text):
line_plots = [
go.Scatter(
x=x[base_voltage],
y=y[base_voltage],
line=dict(width=2, color=voltage_color(float(base_voltage)), shape="spline", smoothing=1.3),
hoverinfo="none",
mode="lines",
)
for base_voltage in x.keys()
]
center_plots = [
go.Scatter(
x=center_x[base_voltage],
y=center_y[base_voltage],
text=text[base_voltage],
mode="markers",
hoverinfo="text",
marker=dict(size=0.0001, color=voltage_color(float(base_voltage))),
)
for base_voltage in text.keys()
]
return line_plots, center_plots
def create_line_segment_map_plot(lats, lons, center_lats, center_lons, text):
line_plots = [
go.Scattermapbox(
mode="lines",
lon=lons[base_voltage],
lat=lats[base_voltage],
hoverinfo="none",
showlegend=False,
line=dict(color=voltage_color(float(base_voltage)), width=6),
)
for base_voltage in lats.keys()
]
center_plots = [
go.Scattermapbox(
lat=center_lats[base_voltage],
lon=center_lons[base_voltage],
text=text[base_voltage],
mode="markers",
showlegend=False,
hoverinfo="text",
marker=dict(size=0.0001, color=voltage_color(float(base_voltage))),
)
for base_voltage in text.keys()
]
return line_plots + center_plots
def _np_datetime_to_ms(np_datetime):
return np_datetime.astype("datetime64[ms]").astype("uint64")
class PowerPlot:
@staticmethod
def draw_with_map(power_area, height=None):
# plot substations
node_locs = node_locations(power_area, interpolate_missing_positions=False)
substation_plots = create_substation_map_plot(node_locs)
# plot ac line segments
lats, lons, center_lats, center_lons, text = edge_locations(power_area, node_locs)
ac_line_segment_plots = create_line_segment_map_plot(lats, lons, center_lats, center_lons, text)
center = nanmean([v for v in node_locs.values()], axis=0)
fig = go.Figure(
# ordering matters here: substations last so they are drawn on top
data=ac_line_segment_plots + substation_plots,
layout=go.Layout(
hovermode="closest",
mapbox_style="stamen-terrain",
margin={"r": 0, "t": 0, "l": 0, "b": 0},
height=height,
mapbox=dict(zoom=7, center=dict(lon=center[0], lat=center[1])),
),
)
return fig
@staticmethod
def draw(power_area, labels="fixed", position="kamada", height=None):
node_positions = node_layout(power_area, position)
node_plot_mode = "markers"
if labels == "fixed":
node_plot_mode += "+text"
# plot substations
substation_plot = create_substation_plot(node_positions, node_plot_mode)
# plot ac line segments
lats, lons, center_lats, center_lons, text = edge_locations(power_area, node_positions)
ac_line_segment_plots, ac_line_label_point_plot = create_line_segment_plot(
lons, lats, center_lons, center_lats, text
)
fig = go.Figure(
data=ac_line_segment_plots + ac_line_label_point_plot + [substation_plot],
layout=go.Layout(
height=height,
plot_bgcolor="rgb(250,250,250)",
titlefont_size=16,
showlegend=False,
hovermode="closest",
margin=dict(b=0, l=0, r=0, t=0),
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False, constrain="domain"),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False, scaleanchor="x"),
),
)
return fig
@staticmethod
def draw_flow(
power_area,
labels="fixed",
position="kamada",
height=None,
timeseries_type="estimated_value",
granularity="1h",
date: "np.datetime64" = None,
):
"""
Draws power flow through the area.
Args:
labels,position,height: as in `draw`
timeseries_type: type of time series to retrieve, i.e. value/estimated_value.
granularity: time step at which to average values over, as in the Python SDK `retrieve_dataframe` function.
date: datetime object at which to visualize flow, use None for now.
"""
node_plot_mode = "markers"
if labels == "fixed":
node_plot_mode += "+text"
node_positions = node_layout(power_area, position)
substation_plot = create_substation_plot(node_positions, node_plot_mode)
lats, lons, center_lats, center_lons, text = edge_locations(power_area, node_positions)
ac_line_segment_plots, ac_line_label_point_plot = create_line_segment_plot(
lons, lats, center_lons, center_lats, text
)
terminals = PowerAssetList(
list(set(sum([list(data["terminals"].values()) for f, t, data in power_area._graph.edges(data=True)], []))),
cognite_client=power_area._cognite_client,
)
ts = terminals.time_series(measurement_type="ThreePhaseActivePower", timeseries_type=timeseries_type)
analogs = power_area._cognite_client.assets.retrieve_multiple(ids=[t.asset_id for t in ts])
terminal_ids: List[int] = [a.parent_id for a in analogs]
target_time = np.datetime64(date or datetime.now())
delta = np.timedelta64(5, "D")
start = _np_datetime_to_ms((target_time - delta))
end = _np_datetime_to_ms((target_time + delta))
df = power_area._cognite_client.datapoints.retrieve_dataframe(
id=[t.id for t in ts],
aggregates=["average"],
granularity=granularity,
start=start, # TODO: split data prep and update
end=end,
include_aggregate_name=False,
)
df.columns = terminal_ids
ix = np.searchsorted(df.index, target_time, side="left")
flow_values = df.iloc[ix - 1, :]
title = f"flow at {df.index[ix - 1]}"
distances = [
np.linalg.norm(np.array(node_positions[edge[0]]) - np.array(node_positions[edge[1]]))
for edge in power_area._graph.edges
]
global_arrow_scale = 0.15 * np.mean(distances) # TODO: what is reasonable here?
arrow_traces = []
for f, t, data in power_area._graph.edges(data=True):
terminal_map = data["terminals"]
terminals = [terminal_map[f], terminal_map[t]]
flow_values_t = []
for side in [0, 1]:
val = np.nan
if terminals[side].id in flow_values.index:
val = flow_values[terminals[side].id]
if isinstance(val, pd.Series):
val = val.dropna()
val = val.mean() if not val.empty else np.nan
flow_values_t.append(val)
from_pos = np.array(node_positions[f])
to_pos = np.array(node_positions[t])
from_to_vec = to_pos - from_pos
distance = np.linalg.norm(from_to_vec)
arrow_scale = min(global_arrow_scale, 0.3 * distance)
from_to_vec /= max(distance, 0.1)
if flow_values_t[0] < flow_values_t[1]:
flow_vec = -from_to_vec
else:
flow_vec = from_to_vec
orthogonal = np.array([-flow_vec[1], flow_vec[0]])
mid = (from_pos + to_pos) / 2
sign_from = math.copysign(1, flow_values_t[0]) if not np.isnan(flow_values_t[0]) else 0
arrow_from_mid = mid - 0.5 * arrow_scale * from_to_vec # arrow middle is always closer to from
# direction of arrow depends on sign of flow
arrow_from_tail = arrow_from_mid - 0.33 * arrow_scale * flow_vec * sign_from
arrow_from_head = arrow_from_mid + 0.33 * arrow_scale * flow_vec * sign_from
arrow_from_left = arrow_from_tail - orthogonal * global_arrow_scale * 0.5
arrow_from_right = arrow_from_tail + orthogonal * global_arrow_scale * 0.5
sign_to = math.copysign(1, flow_values_t[1]) if not np.isnan(flow_values_t[1]) else 0
arrow_to_mid = mid + 0.5 * arrow_scale * from_to_vec # arrow middle is always closer to to
# direction of arrow depends on sign of flow
arrow_to_tail = arrow_to_mid - 0.33 * arrow_scale * flow_vec * (-sign_to)
arrow_to_head = arrow_to_mid + 0.33 * arrow_scale * flow_vec * (-sign_to)
arrow_to_left | |
""" library to take autodiff and execute a computation graph """
from __future__ import absolute_import
from .BatchNorm import Batch_NormalizationOp
import numpy as np
from scipy.sparse import spmatrix, coo_matrix
from .. import ndarray
from .._base import DNNL_LIB
from ..cpu_links import array_set as cpu_array_set
from .Variable import PlaceholderOp # add for optimizer
from ..dataloader import DataloaderOp, GNNDataLoaderOp
from .AllReduceCommunicate import AllReduceCommunicateOp
from .ParameterServerCommunicate import ParameterServerCommunicateOp, ParameterServerSparsePullOp, parameterServerSparsePull_op
from .AddElewise import add_op
from .DataTransfer import DataH2DOp, DataD2HOp, DataD2HSparseOp
from .EmbeddingLookUp import EmbeddingLookUp, EmbeddingLookUp_Gradient
from ..optimizer import OptimizerOp
from . import OnesLike
from ..stream import create_stream_handle, Event
from ..context import get_current_context, get_launch_config_by_traverse_nodes, assign_context_by_traverse_nodes, DeviceGroup
from .PipelineSend import PipelineSendOp
from .PipelineReceive import PipelineReceiveOp
from .Dropout import DropoutOp
from .LayerNorm import Layer_NormalizationOp
from .OnesLike import OnesLikeOp
from operator import add
from functools import reduce
import ctypes
import os
from time import time
def path_to_lib(name):
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(curr_path, '../../../build/lib/')
return os.path.join(lib_path, name)
def wrapped_mpi_nccl_init(init_nccl=True, devices=None):
from ..communicator.mpi_nccl_comm import mpi_communicator
global mpi_comm
global nccl_comm
if 'mpi_comm' not in globals():
mpi_comm = mpi_communicator(devices=devices)
if 'nccl_comm' not in globals():
nccl_comm = mpi_comm.ncclInit() if init_nccl else None
return nccl_comm
def new_group_comm(devices_context=None):
assert 'mpi_comm' in globals()
global mpi_comm
if devices_context is None:
comm = mpi_comm.ncclInit()
else:
comm = mpi_comm.ncclGroupInit(devices_context)
return comm
def get_nccl_communicate():
global nccl_comm
return nccl_comm
def get_worker_communicate():
global ps_comm
return ps_comm
def worker_init():
global ps_comm
ll = ctypes.cdll.LoadLibrary
ps_comm = ll(path_to_lib("libps.so"))
ps_comm.Init()
def worker_finish():
ps_comm.Finalize()
def server_init():
global ps_comm
ll = ctypes.cdll.LoadLibrary
ps_comm = ll(path_to_lib("libps.so"))
ps_comm.Init()
ps_comm.StartServer()
def server_finish():
ps_comm.Finalize()
def scheduler_init():
global ps_comm
ll = ctypes.cdll.LoadLibrary
ps_comm = ll(path_to_lib("libps.so"))
ps_comm.Init()
def scheduler_finish():
ps_comm.Finalize()
class HetuConfig(object):
__slots__ = [
'eval_node_list',
'train_name',
'val_name',
'context',
'seed',
'np_rand',
'comm_mode',
'node_strategy',
'context_launch',
'ps_comm',
'nccl_comm',
'local_rank',
'rank',
'nrank',
'p2p_stream',
'comp_stream',
'nccl_stream',
'h2d_stream',
'd2h_stream',
'h2d_ops',
'd2h_ops',
'ps_map',
'infer_ps_map',
'dataloader_ops',
'use_sparse_pull',
'cstable_policy',
'inference',
'enable_lazy',
'bsp',
'prefetch',
'cache_bound',
'log_path',
'my_eval_nodes',
'param_allreduce_group',
'placeholder_to_arr_map',
'gpipe'
]
def __init__(
self,
eval_node_list,
train_name,
val_name,
ctx=None,
seed=None,
comm_mode=None,
use_sparse_pull=True,
cstable_policy=None,
bsp=False,
prefetch=True,
enable_lazy=True,
cache_bound=100,
log_path=None,
gpipe=False,
):
'''
context: default device context
comm_mode: communication mode, should be one of the following
None -> Single GPU
PS -> Parameter Server
AllRedeuce -> MPI AllReduce
Hybrid -> Parameter Server for Sparse Parameter and MPI AllReduce for Dense Parameter
'''
self.gpipe = gpipe
self.eval_node_list = eval_node_list
self.train_name = train_name
self.val_name = val_name
# check context
if ctx is None:
ctx = get_current_context()
assert ctx, 'Default context should be determined.'
self.comm_mode = comm_mode
self.node_strategy = {}
local_gpu_devices = None
context_launch = isinstance(ctx, DeviceGroup)
self.context_launch = context_launch
if context_launch:
# with context usage
launchMPI, launchPS, self.node_strategy, devices = get_launch_config_by_traverse_nodes(
eval_node_list, ctx)
local_gpu_devices = sorted(
[dev.device_id for dev in devices if dev.local and ndarray.is_gpu_ctx(dev)])
if not launchMPI and not launchPS:
self.comm_mode = None
elif launchMPI and not launchPS:
self.comm_mode = 'AllReduce'
elif not launchMPI and launchPS:
self.comm_mode = 'PS'
else:
self.comm_mode = 'Hybrid'
# in pipeline or model parallel we have to initialize another p2p stream
init_p2p_stream = len(devices) != len(ctx)
# variables initialization
self.seed = seed if seed else np.int64(time())
self.np_rand = np.random.RandomState(self.seed)
# get attribute of communication mode
self.ps_comm = None
self.nccl_comm = None
self.local_rank = None
self.rank = None
self.nrank = None
ps_nrank = None
if self.comm_mode == 'PS' or self.comm_mode == 'Hybrid':
worker_init()
self.ps_comm = get_worker_communicate()
ps_rank = int(self.ps_comm.rank())
ps_nrank = int(
os.environ['DMLC_NUM_WORKER']) if 'DMLC_NUM_WORKER' in os.environ else 1
if self.comm_mode == "Hybrid" or self.comm_mode == "AllReduce":
self.nccl_comm = wrapped_mpi_nccl_init(devices=local_gpu_devices)
elif context_launch:
self.nccl_comm = wrapped_mpi_nccl_init(
init_nccl=init_p2p_stream, devices=local_gpu_devices)
if self.nccl_comm is not None:
self.local_rank = self.nccl_comm.local_rank
device_id = self.nccl_comm.dev_id
self.rank = self.nccl_comm.rank
self.nrank = self.nccl_comm.nrank
if ps_nrank:
assert ps_nrank == self.nrank
elif self.comm_mode == 'PS':
self.rank = ps_rank
self.nrank = ps_nrank
if context_launch:
global mpi_comm
self.local_rank = mpi_comm.local_rank
device_id = mpi_comm.dev_id
self.my_eval_nodes = eval_node_list
self.p2p_stream = None
self.param_allreduce_group = {}
if context_launch:
# comm_mode is None <=> only 1 model parallel instance
self.context = ndarray.gpu(device_id)
self.p2p_stream = create_stream_handle(
self.context) if init_p2p_stream else None
self.my_eval_nodes, trainable_params, has_send_recv = assign_context_by_traverse_nodes(
eval_node_list, self.context, self.nccl_comm, self.p2p_stream)
if (self.comm_mode == "Hybrid" or self.comm_mode == "AllReduce") and has_send_recv:
# here we need to use group communicator to implement allreduce,
# since not all processes use the same group
groups = set([n.raw_ctx for n in trainable_params])
temp_group_comms = {}
for group in groups:
temp_group_comms[group] = new_group_comm(group)
self.param_allreduce_group = {
n: temp_group_comms[n.raw_ctx] for n in trainable_params}
else:
self.context = ctx
on_gpu = ndarray.is_gpu_ctx(self.context)
self.nccl_stream = None
if self.comm_mode == "Hybrid" or self.comm_mode == "AllReduce":
if on_gpu:
self.nccl_stream = create_stream_handle(self.context)
self.nccl_comm = get_nccl_communicate()
# define streams
self.comp_stream = create_stream_handle(
self.context) if on_gpu else None
self.h2d_stream = create_stream_handle(
self.context) if on_gpu else None
self.d2h_stream = create_stream_handle(
self.context) if on_gpu else None
self.use_sparse_pull = use_sparse_pull if self.comm_mode == 'PS' or self.comm_mode == "Hybrid" else False
self.cstable_policy = cstable_policy if self.comm_mode == 'PS' or self.comm_mode == "Hybrid" else None
self.prefetch = prefetch if self.comm_mode == 'PS' or self.comm_mode == 'Hybrid' else False
if self.cstable_policy is not None:
self.cstable_policy = self.cstable_policy.upper()
self.use_sparse_pull = False
self.h2d_ops = {}
self.d2h_ops = {}
self.ps_map = {}
self.infer_ps_map = {}
self.enable_lazy = False and enable_lazy # now we don't use lazy
self.bsp = bsp
self.cache_bound = int(cache_bound)
self.log_path = log_path
if log_path is not None and (self.comm_mode == 'PS' or self.comm_mode == "Hybrid"):
assert os.path.isdir(
log_path), 'Need to specify a work directory to save logs.'
self.ps_comm.startRecord(ctypes.c_char_p(bytes(log_path, 'utf-8')))
self.placeholder_to_arr_map = dict()
topo_sort_with_hook(self.my_eval_nodes, self)
class Executor(object):
"""Executor computes values for given set of nodes in computation graph."""
def __init__(self, eval_node_dict, config=None, **kargs):
"""
Parameters
----------
eval_node_dict: dict of list of nodes whose values need to be computed.
"""
if not isinstance(eval_node_dict, dict):
eval_node_dict = {'default': eval_node_dict}
train_name, val_name = None, None
for k, v in eval_node_dict.items():
if any([isinstance(node, OptimizerOp) for node in v]):
# get the last subexecutor containing optimizer as train for ps op
train_name = k
else:
# get the last subexecutor not containing optimizer as val for ps op
val_name = k
all_eval_nodes = list(set(reduce(add, eval_node_dict.values())))
if config is None:
config = HetuConfig(eval_node_list=all_eval_nodes,
train_name=train_name, val_name=val_name, **kargs)
assert isinstance(
config, HetuConfig), 'Config type %s invalid.' % str(type(config))
self.eval_node_dict = eval_node_dict
self.config = config
if config.gpipe:
self.subexecutor = {k: SubExecutor4Gpipe(k, v, config) for k, v in eval_node_dict.items()}
else:
self.subexecutor = {k: SubExecutor(k, v, config) for k, v in eval_node_dict.items()}
self.topo_order = find_topo_sort(config.my_eval_nodes)
self.param_nodes = [node for node in self.topo_order if isinstance(
node, PlaceholderOp) and node.trainable]
self.comm_mode = self.config.comm_mode
self.ps_comm = self.config.ps_comm
self.local_rank = self.config.local_rank
self.rank = self.config.rank
def run(self, name='default', eval_node_list={}, feed_dict={}, convert_to_numpy_ret_vals=False):
return self.subexecutor[name].run(eval_node_list, feed_dict, convert_to_numpy_ret_vals)
@property
def batch_num(self):
assert len(
self.subexecutor) == 1, 'Batch num should be used with only 1 subexecutor.'
return list(self.subexecutor.values())[0].batch_num
def get_batch_num(self, name='default'):
return self.subexecutor[name].batch_num
def save(self, file_path):
assert os.path.isdir(
file_path), 'Need to specify a work directory to save parameters.'
if self.comm_mode in (None, 'AllReduce'):
# when using allreduce, users need to specify the worker whose rank equals 0 to save
for node in self.param_nodes:
np.save(os.path.join(file_path, node.name + '.npy'),
self.config.placeholder_to_arr_map[node].asnumpy())
else:
self.ps_comm.BarrierWorker()
if self.config.rank == 0:
for node in self.param_nodes:
if node.is_embed or self.comm_mode == 'PS':
node.event.sync()
nodeid = ctypes.c_int(node.id)
self.ps_comm.SaveParam(
nodeid, ctypes.c_char_p(bytes(file_path, 'utf-8')))
self.ps_comm.Wait(nodeid)
else:
np.save(os.path.join(file_path, node.name + '.npy'),
self.config.placeholder_to_arr_map[node].asnumpy())
self.ps_comm.BarrierWorker()
def load(self, file_path):
assert os.path.isdir(
file_path), 'Need to specify a work directory to load parameters.'
if self.comm_mode in (None, 'AllReduce'):
for node in self.param_nodes:
self.config.placeholder_to_arr_map[node][:] = np.load(
os.path.join(file_path, node.name + '.npy'))
else:
self.ps_comm.BarrierWorker()
if self.config.rank == 0:
for node in self.param_nodes:
if node.is_embed or self.comm_mode == 'PS':
node.event.sync()
nodeid = ctypes.c_int(node.id)
self.ps_comm.LoadParam(
nodeid, ctypes.c_char_p(bytes(file_path, 'utf-8')))
node.event.update()
self.ps_comm.BarrierWorker()
for node in self.topo_order:
if isinstance(node, PlaceholderOp) and node.trainable and not node.is_embed:
if self.comm_mode == 'PS':
node.event.sync()
nodeid = ctypes.c_int(node.id)
self.ps_comm.Pull(
nodeid, self.config.ps_map[node].handle)
node.event.update()
else:
self.config.placeholder_to_arr_map[node][:] = np.load(
os.path.join(file_path, node.name + '.npy'))
elif isinstance(node, EmbeddingLookUp) and self.config.prefetch:
node.event.sync()
nodeid = ctypes.c_int(node.inputs[0].id)
self.ps_comm.SparsePull(nodeid, node.inputs[1].get_next_arr(
self.name).handle, self.config.ps_map[node.inputs[0]].handle)
node.event.update()
self.ps_comm.BarrierWorker()
def recordLoads(self):
for node in self.config.ps_map:
node.event.sync()
self.ps_comm.getLoads()
def __del__(self):
if self.config.comp_stream is not None:
self.config.comp_stream.sync()
if self.config.h2d_stream is not None:
self.config.h2d_stream.sync()
if self.config.d2h_stream is not None:
self.config.d2h_stream.sync()
if self.config.nccl_stream is not None:
self.config.nccl_stream.sync()
for node in self.param_nodes:
if node.event:
node.event.sync()
if self.comm_mode in ('PS', 'Hybrid'):
worker_finish()
class SubExecutor4Gpipe(object):
def __init__(self, name, eval_node_list, config):
"""
Parameters
----------
eval_node_list: list of nodes whose values need to be computed.
topo_order: list of | |
# constants
PROBE_SPACING = 100000
MAX_HEIGHT = 4
ABERRATION_HEIGHTS = {
'DEL': -1.0,
'DUP': +1.0,
'TDUP': +1.5,
'IDUP': +0.5,
'INV': -0.5,
'INS': -1.5,
'BND': -2.0,
'TRA': -2.0,
}
# Hard-coded data about GRCh37 follows.
# Whenever GRCh38 becomes relevant, use external .fai and .bed files instead.
CONTIG_LENGTHS_37 = {
'1': 249250621,
'2': 243199373,
'3': 198022430,
'4': 191154276,
'5': 180915260,
'6': 171115067,
'7': 159138663,
'8': 146364022,
'9': 141213431,
'10': 135534747,
'11': 135006516,
'12': 133851895,
'13': 115169878,
'14': 107349540,
'15': 102531392,
'16': 90354753,
'17': 81195210,
'18': 78077248,
'19': 59128983,
'20': 63025520,
'21': 48129895,
'22': 51304566,
'X': 155270560,
'Y': 59373566,
}
CONTIG_LENGTHS_38 = {
'1': 248956422,
'2': 242193529,
'3': 198295559,
'4': 190214555,
'5': 181538259,
'6': 170805979,
'7': 159345973,
'8': 145138636,
'9': 138394717,
'10': 133797422,
'11': 135086622,
'12': 133275309,
'13': 114364328,
'14': 107043718,
'15': 101991189,
'16': 90338345,
'17': 83257441,
'18': 80373285,
'19': 58617616,
'20': 64444167,
'21': 46709983,
'22': 50818468,
'X': 15604089,
'Y': 57227415,
}
N_INTERVALS_38 = {
'1': [
(0, 10000),
(207666, 257666),
(297968, 347968),
(535988, 585988),
(2702781, 2746290),
(12954384, 13004384),
(16799163, 16849163),
(29552233, 29553835),
(121976459, 122026459),
(122224535, 122224635),
(122503147, 122503247),
(124785432, 124785532),
(124849129, 124849229),
(124932724, 124932824),
(124977944, 124978326),
(125013060, 125013223),
(125026048, 125026071),
(125029104, 125029169),
(125103213, 125103233),
(125130246, 125131847),
(125171347, 125173583),
(125184587, 143184587),
(223558935, 223608935),
(228558364, 228608364),
(248946422, 248956422),
],
'2': [
(0,10000),
(16145119, 16146119),
(89330679, 89530679),
(89685992, 89753992),
(90402511, 91402511),
(92138145, 92188145),
(94090557, 94140557),
(94293015, 94496015),
(97439618, 97489618),
(238903659, 238904047),
(242183529, 242193529),
],
'3': [
(0, 10000),
(90550102, 90550202),
(90565295, 90568828),
(90699772, 90699792),
(90722458, 90772458),
(91233586, 91233686),
(91247622, 91247722),
(91249905, 91256421),
(91257890, 91260180),
(91265381, 91276994),
(91282175, 91282734),
(91291069, 91291218),
(91345078, 91345173),
(91364131, 91364151),
(91438798, 91438818),
(91553319, 91553419),
(93655574, 93705574),
(198235559, 198285559),
(198285559, 198295559),
],
'4': [
(0, 10000),
(1429358, 1434206),
(1435794, 1441552),
(8797477, 8816477),
(9272916, 9322916),
(31819295, 31832569),
(31835775, 31835795),
(32833016, 32839016),
(49336924, 49486924),
(49658100, 49708100),
(49711961, 49712061),
(51743951, 51793951),
(58878793, 58921381),
(190123121, 190173121),
(190204555, 190214555),
],
'5': [
(0, 10000),
(17530548, 17580548),
(46435900, 46485900),
(46569062, 46569162),
(46796725, 46796825),
(47061288, 47061388),
(47069162, 47073730),
(47078603, 47078720),
(47079733, 47082080),
(47106894, 47106994),
(47153339, 47153439),
(47296069, 47296169),
(47300585, 47306086),
(47309084, 47309184),
(49591369, 49591469),
(49592920, 49599323),
(49600986, 49601086),
(49603131, 49609714),
(49611721, 49612096),
(49618994, 49621605),
(49628165, 49630729),
(49633793, 49641815),
(49646823, 49646923),
(49650573, 49656261),
(49661871, 49666173),
(49667431, 49667531),
(49721203, 49721303),
(50059807, 50109807),
(139452659, 139453659),
(155760324, 155761324),
(181478259, 181528259),
(181528259, 181538259),
],
'6':[
(0, 10000),
(10000, 60000),
(58453888, 58553888 ),
(59829934, 60229934 ),
(61357029, 61363066 ),
(61370554, 61371372 ),
(61378482, 61378582 ),
(61381502, 61381602 ),
(61393846, 61393946 ),
(61398161, 61398261 ),
(95020790, 95070790 ),
(167591393, 167641393),
(170745979, 170795979),
(170795979, 170805979),
],
'7': [
(0, 10000),
(237846, 240242),
(58119653, 58169653),
(60828234, 60878234),
(61063378, 61063426),
(61327788, 61377788),
(61528020, 61578020),
(61964169, 61967063),
(61976104, 62026104),
(62456779, 62506779),
(143650804, 143700804),
(159335973, 159345973),
],
'8': [
(0, 10000),
(10000, 60000),
(7617127, 7667127),
(12234345, 12284345),
(43983744, 44033744),
(45877265, 45927265),
(85664222, 85714222),
(145078636, 145128636),
(145128636, 145138636),
],
'9':[
(0, 10000),
(40529470, 40529480),
(40537052, 40537134),
(40547487, 40547497),
(40561938, 40561948),
(41225986, 41229378),
(41237752, 41238504),
(43222167, 43236167),
(43240559, 43240579),
(43254332, 43254352),
(43263290, 43263820),
(43268730, 43268750),
(43270944, 43274935),
(43276248, 43276457),
(43281323, 43282956),
(43332174, 43333269),
(43370405, 43371325),
(43377453, 43382279),
(43389535, 43389635),
(45518558, 60518558),
(60688432, 60738432),
(60779521, 60829521),
(61003887, 61053887),
(61231966, 61281966),
(61468808, 61518808),
(61735368, 61785368),
(62149738, 62249738),
(62748832, 62798832),
(62958371, 63008371),
(63202862, 63252862),
(63492264, 63542264),
(63918447, 63968447),
(64135013, 64185013),
(64215162, 64315162),
(64998124, 65048124),
(65080082, 65130082),
(65325123, 65375123),
(65595191, 65645191),
(66391387, 66591387),
(67920552, 68220552),
(134183092, 134185536),
(138334717, 138384717),
(138384717, 138394717),
],
'10':[
(0, 10000),
(38529907, 38573338),
(38906036, 38911580),
(38913438, 38918269),
(39229918, 39230136),
(39238955, 39239118),
(39254773, 39254793),
(39338430, 39338450),
(39341685, 39341705),
(39409792, 39410237),
(39479351, 39479371),
(39497198, 39497296),
(39570652, 39570672),
(39585287, 39590435),
(39593013, 39597435),
(39598812, 39598832),
(39602699, 39606089),
(39607431, 39607451),
(39613189, 39615618),
(39617141, 39617255),
(39622353, 39625274),
(39635037, 39635057),
(39636682, 39686682),
(39935900, 39936000),
(41497440, 41497540),
(41545720, 41545820),
(41593521, 41693521),
(41916265, 42066265),
(47780368, 47870368),
(124121200, 124121502),
(131597030, 131597130),
(133690466, 133740466),
(133787422, 133797422),
],
'11': [
(0, 10000),
(10000, 60000),
(50821348, 50871348),
(50871348, 51078348),
(51090317, 51090417),
(54342399, 54342499),
(54425074, 54525074),
(70955696, 71055696),
(87978202, 88002896),
(96566178, 96566364),
(135076622, 135086622),
],
'12': [
(0, 10000),
(7083650, 7084650),
(34719407, 34769407),
(34816611, 34816711),
(34820185, 34820285),
(34822289, 34829237),
(34832088, 34832188),
(34835195, 34835295),
(37185252, 37235252),
(37240944, 37245716),
(37255332, 37257055),
(37333222, 37333242),
(37334747, 37334767),
(37379851, 37380460),
(37460032, 37460128),
(132223362, 132224362),
(133265309, 133275309),
],
'13': [
(0, 10000),
(10000, 16000000),
(16022537, 16022637),
(16110659, 16110759),
(16164892, 16164992),
(16228527, 16228627),
(16249297, 16249397),
(16256067, 16256167),
(16259412, 16259512),
(16282073, 16282173),
(17416384, 17416484),
(17416824, 17416924),
(17417264, 17417364),
(17418562, 17418662),
(18051248, 18071248),
(18071248, 18171248),
(18358106, 18408106),
(86202979, 86252979),
(111703855, 111753855),
(111793441, 111843441),
(113673020, 113723020),
(114354328, 114364328),
],
'14': [
(0, 10000),
(10000, 16000000),
(16022537, 16022637),
(16053976, 16054459),
(16061677, 16061993),
(16086625, 16089562),
(16096530, 16096630),
(16105376, 16113232),
(16130858, 16133335),
(16140527, 16140627),
(16228649, 16228749),
(16282882, 16282982),
(16346517, 16346617),
(16367287, 16367387),
(16374057, 16374157),
(16377402, 16377502),
(16400063, 16400163),
(16404348, 16404448),
(17538659, 17538759),
(17539099, 17539199),
(17539539, 17539639),
(17540837, 17540937),
(18173523, 18223523),
(18712644, 18862644),
(19511713, 19611713),
(106883718, 107033718),
(107033718, 107043718),
],
'15': [
(0, 10000),
(10000,17000000),
(17049135, 17049334),
(17076577, 17076597),
(17083573, 17083673),
(17498951, 17499051),
(18355008, 18355108),
(19725254, 19775254),
(20689304, 20729746),
(21193490, 21242090),
(21778502, 21828502),
(22308242, 22358242),
(23226874, 23276874),
(84270066, 84320066),
(101981189, 101991189),
],
'16': [
(0, 10000),
(18436486, 18486486),
(33214595, 33264595),
(33392411, 33442411),
(34289329, 34339329),
(34521510, 34571510),
(34576805, 34580965),
(34584085, 34584622),
(36260386, 36260628),
(36261158, 36311158),
(36334460, 36334560),
(36337566, 36337666),
(38265669, 38265769),
(38269096, 38275758),
(38280682, 46280682),
(46280682, 46380682),
(90228345, 90328345),
(90328345, 90338345),
],
'17': [
(0, 10000),
(10000, 60000),
(448188, 488987),
(490395, 491111),
(21795850, 21814103),
(21860937, 21860957),
(21976511, 21976531),
(21983452, 21983554),
(21984549, 21985100),
(21992061, 22042061),
(22089188, 22089410),
(22763679, 22813679),
(23194918, 23195018),
(26566633, 26566733),
(26616164, 26616264),
(26627010, 26627349),
(26638554, 26638627),
(26640334, 26640620),
(26643468, 26643843),
(26698590, 26698998),
(26720420, 26721376),
(26735204, 26735774),
(26805755, 26805775),
(26820065, 26820266),
(26859724, 26860166),
(26876740, 26876850),
(26880254, 26880354),
(26885980, 26935980),
(81742542, 81792542),
(81796281, 81797727),
(81798717, 81799133),
(83247441, 83257441),
],
'18': [
(0, 10000),
(15410899, 15460899),
(15780377, 15780477),
(15788380, 15791047),
(15797755, 15797855),
(20561439, 20561539),
(20564714, 20571466),
(20582635, 20582735),
(20603147, 20603247),
(20696289, 20696389),
(20736025, 20736125),
(20813083, 20813183),
(20830724, 20831341),
(20835547, 20835592),
(20839697, 20839797),
(20861206, 20911206),
(46969912, 47019912),
(54536574, 54537528),
(80263285, 80363285),
(80363285, 80373285),
],
'19': [
(24448980, 24498980),
(24552652, 24552752),
(24891256, 24891356),
(24895790, 24895890),
(24898313, 24904771),
(24908589, 24908689),
(27190874, 27240874),
(58607616, 58617616),
],
'20': [
(0, 10000),
(10000, 60000),
(63215, 63840),
(66235, 66335),
(26348365, 26348390),
(26364240, 26365414),
(26382164, 26382616),
(26386232, 26436232),
(26586955, 26587055),
(26590875, 26596363),
(26608045, 26608145),
(28494539, 28494639),
(28499358, 28504764),
(28508897, 28508997),
(28556953, 28557053),
(28646195, 28646295),
(28648008, 28648108),
(28728874, 28728974),
(28751119, 28752590),
(28754750, 28754770),
(28757831, 28757851),
(28790010, 28790158),
(28820603, 28820663),
(28843401, 28859997),
(28861347, 28861367),
(28867524, 28868452),
(28875884, 28875904),
(28889198, 28889218),
(28890335, 28896362),
(29125693, 29125793),
(29204668, 29204768),
(29271546, 29271826),
(29307456, 29307476),
(29315342, 29315821),
(29362154, 29362183),
(29412507, 29413577),
(29447838, 29447883),
(29452158, 29452178),
(29538733, 29538783),
(29540234, 29540284),
(29556103, 29556141),
(29562970, 29563363),
(29564411, 29565353),
(29592644, 29592737),
(29651590, 29651610),
(29697363, 29697630),
(29884750, 29884850),
(29917304, 29917404),
(30038348, 30088348),
(30425128, 30456077),
(30761898, 30811898),
(31001508, 31051508),
(31107036, 31157036),
(31159119, 31161625),
(64334167, 64434167),
(64434167, 64444167),
],
'21': [
(0, 10000),
(10000, 5010000),
(5166246, 5216246),
(5393558, 5443558),
(5449012, 5499012),
(5627596, 5677596),
(5796009, 5846009),
(5916593, 5966593),
(6161371, 6211371),
(6377258, 6427258),
(6580181, 6630181),
(6739085, 6789085),
(6934219, 6984219),
(7149527, 7199527),
(7327865, 7377865),
(7500890, 7550890),
(7693700, 7743700),
(7865746, 7915746),
(8049839, 8099839),
(8260971, 8310971),
(8472360, 8522360),
(8706715, 8756715),
(8886604, 8986604),
(9196087, 9246087),
(9377143, 9527143),
(10169868, 10269868),
(10274327, 10324327),
(10814560, 10864560),
(10887097, 10887197),
(10975219, 10975319),
(11029452, 11029552),
(11093087, 11093187),
(11113857, 11113957),
(11120627, 11120727),
(11123972, 11124072),
(11146633, 11146733),
(12280944, 12281044),
(12281384, 12281484),
(12281824, 12281924),
(12283122, 12283222),
(12915808, 12965808),
(41584292, 41584392),
(43212462, 43262462),
(46699983, 46709983),
],
'22': [
(0, 10000),
(10000, 10510000),
(10784643, 10834643),
(10874572, 10924572),
(10966724, 11016724),
(11068987, 11118987),
(11160921, 11210921),
(11378056, 11428056),
(11497337, 11547337),
(11631288, 11681288),
(11724629, 11774629),
(11977555, 12027555),
(12225588, 12275588),
(12438690, 12488690),
(12641730, 12691730),
(12726204, 12776204),
(12818137, 12868137),
(12904788, 12954788),
(12977325, 12977425),
(12986171, 12994027),
(13011653, 13014130),
(13021322, 13021422),
(13109444, 13109544),
(13163677, 13163777),
(13227312, 13227412),
(13248082, 13248182),
(13254852, 13254952),
(13258197, 13258297),
(13280858, 13280958),
(13285143, 13285243),
(14419454, 14419554),
(14419894, 14419994),
(14420334, 14420434),
(14421632, 14421732),
(15054318, 15154318),
(16279672, 16302843),
(16304296, 16305427),
(16307048, 16307605),
(16310302, 16310402),
(16313516, 16314010),
(18239129, 18339129),
(18433513, 18483513),
(18659564, 18709564),
(49973865, 49975365),
(50808468, 50818468),
],
'X': [
(0, 10000),
(44821, 94821),
(133871, 222346),
(226276, 226351),
(1949345, 2132994),
(2137388, 2137488),
(37099262, 37285837 ),
(49348394, 49528394 ),
(50228964, 50278964 ),
(58555579, 58605579 ),
(62412542, 62462542 ),
(114281198, 114331198),
(115738949, 115838949),
(116557779, 116595566),
(120879381, 120929381),
(144425606, 144475606),
(156030895, 156040895),
],
'Y': [
(0, 10000),
(44821, 94821),
(133871, 222346),
(226276, 226351),
(1949345, 2132994),
(2137388, 2137488),
(9046914, 9055174),
(9057608, 9107608),
(9111868, 9112715),
(9114319, 9116371),
(9403713, 9453713),
(10266944, 10316944),
(10544039, 10594039),
(10633440, 10645833),
(10649989, 10651421),
(10669737, 10670732),
(10674058, 10676544),
(10679715, 10682442),
(10691573, 10691902),
(10694192, 10744192),
(10747280, 10747305),
(10809671, 10810479),
(10816758, 10817292),
(10852900, 10855135),
(10871218, 10871897),
(10890419, 10891942),
(10896525, 10898184),
(10908519, 10909101),
(10922486, 10923564),
(10956868, 10957767),
(10961625, 10962533),
(10965694, 10967284),
(10969992, 10970620),
(10986652, 10986677),
(11002276, 11003059),
(11012528, 11013046),
(11016992, 11017247),
(11023374, 11024266),
(11028342, 11029160),
(11036252, 11037032),
(11592902, 11642902),
(11647442, 11647709),
(11653064, 11653102),
(11660374, 11662181),
(11663986, 11664006),
(11669948, 11670088),
(11671800, 11671820),
(11673795, 11674123),
(20207793, 20257793),
(21739542, 21741441),
(21747863, 21748371),
(21750013, 21750314),
(21789281, 21805281),
(26673214, 56673214),
(56771509, 56821509),
(57217415, 57227415),
],
}
# Intervals at which the reference contains N bases
N_INTERVALS_37 = {
'1': [
(0, 10000),
(177417, 227417),
(267719, 317719),
(471368, 521368),
(2634220, 2684220),
(3845268, 3995268),
(13052998, 13102998),
(13219912, 13319912),
(13557162, 13607162),
(17125658, 17175658),
(29878082, 30028082),
(103863906, 103913906),
(120697156, 120747156),
(120936695, 121086695),
(121485434, 142535434),
(142731022, 142781022),
(142967761, 143117761),
(143292816, 143342816),
(143544525, 143644525),
(143771002, 143871002),
(144095783, 144145783),
(144224481, 144274481),
(144401744, 144451744),
(144622413, 144672413),
(144710724, 144810724),
(145833118, 145883118),
(146164650, 146214650),
(146253299, 146303299),
(148026038, 148176038),
(148361358, 148511358),
(148684147, 148734147),
(148954460, 149004460),
(149459645, 149509645),
(205922707, 206072707),
(206332221, 206482221),
(223747846, 223797846),
(235192211, 235242211),
(248908210, 249058210),
(249240621, 249250621),
],
'2': [
(0, 10000),
(3529312, 3579312),
(5018788, 5118788),
(16279724, 16329724),
(21153113, 21178113),
(31725939, 31726790),
(33092197, 33093197),
(33141692, 33142692),
(87668206, 87718206),
(89630436, 89830436),
(90321525, 90371525),
(90545103, 91595103),
(92326171, 95326171),
(110109337, 110251337),
(149690582, 149790582),
(234003741, 234053741),
(239801978, 239831978),
(240784132, 240809132),
(243102476, 243152476),
(243189373, 243199373),
],
'3': [
(0, 60000),
(66170270, 66270270),
(90504854, 93504854),
(194041961, 194047251),
(197962430, 198022430),
],
'4': [
(0, 10000),
(1423146, 1478646),
(8799203, 8818203),
(9274642, 9324642),
(31820917, 31837417),
(32834638, 32840638),
(40296396, 40297096),
(49338941, 49488941),
(49660117, 52660117),
(59739333, 59789333),
(75427379, 75452279),
(191044276, 191154276),
],
'5': [
(0, 10000),
(17530657, 17580657),
(46405641, 49405641),
(91636128, 91686128),
(138787073, 138837073),
(155138727, 155188727),
(180905260, 180915260),
],
'6': [
(0, 60000),
(58087659, 58137659),
(58780166, 61880166),
(62128589, 62178589),
(95680543, 95830543),
(157559467, 157609467),
(157641300, 157691300),
(167942073, 168042073),
(170279972, 170329972),
(171055067, 171115067),
],
'7': [
(0, 10000),
(232484, 282484),
(50370631, 50410631),
(58054331, 61054331),
(61310513, 61360513),
(61460465, 61510465),
(61677020, 61727020),
(61917157, 61967157),
(74715724, 74765724),
(100556043, 100606043),
(130154523, 130254523),
(139379377, 139404377),
(142048195, 142098195),
(142276197, 142326197),
(143347897, 143397897),
(154270634, 154370634),
(159128663, 159138663),
],
'8': [
(0, 10000),
(7474649, 7524649),
(12091854, 12141854),
(43838887, 46838887),
(48130499, 48135599),
(86576451, 86726451),
(142766515, 142816515),
(145332588, 145432588),
(146304022, 146364022),
],
'9': [
(0, 10000),
(39663686, 39713686),
(39974796, 40024796),
(40233029, 40283029),
(40425834, 40475834),
(40940341, 40990341),
(41143214, 41193214),
(41365793, 41415793),
(42613955, 42663955),
(43213698, 43313698),
(43946569, 43996569),
(44676646, 44726646),
(44908293, 44958293),
(45250203, 45350203),
(45815521, 45865521),
(46216430, 46266430),
(46461039, 46561039),
(47060133, 47160133),
(47317679, 65467679),
(65918360, 65968360),
(66192215, 66242215),
(66404656, 66454656),
(66614195, 66664195),
(66863343, 66913343),
(67107834, 67207834),
(67366296, 67516296),
(67987998, 68137998),
(68514181, 68664181),
(68838946, 68988946),
(69278385, 69328385),
(70010542, 70060542),
(70218729, 70318729),
(70506535, 70556535),
(70735468, 70835468),
(92343416, 92443416),
(92528796, 92678796),
(133073060, 133223060),
(137041193, 137091193),
(139166997, 139216997),
(141153431, 141213431),
],
'10': [
(0, 60000),
(17974675, 18024675),
(38818835, 38868835),
(39154935, 42354935),
(42546687, 42596687),
(46426964, 46476964),
(47429169, 47529169),
(47792476, 47892476),
(48055707, 48105707),
(49095536, 49195536),
(51137410, 51187410),
(51398845, 51448845),
(125869472, 125919472),
(128616069, 128766069),
(133381404, 133431404),
(133677527, 133727527),
(135524747, 135534747),
],
'11': [
(0, 60000),
(1162759, 1212759),
(50783853, 51090853),
(51594205, 54694205),
(69089801, 69139801),
(69724695, 69774695),
(87688378, 87738378),
(96287584, 96437584),
(134946516, 135006516),
],
'12': [
(0, 60000),
(95739, 145739),
(7189876, 7239876),
(34856694, 37856694),
(109373470, 109423470),
(121965036, 121965236),
(122530623, 122580623),
(123928080, 123928280),
(132706992, 132806992),
(133841895, 133851895),
],
'13': [
(0, 19020000),
(86760324, 86910324),
(112353994, 112503994),
(114325993, 114425993),
(114639948, 114739948),
(115109878, 115169878),
],
'14': [
(0, 19000000),
(107289540, 107349540),
],
'15': [
(0, 20000000),
(20894633, 20935075),
(21398819, 21885000),
(22212114, 22262114),
(22596193, 22646193),
(23514853, 23564853),
(29159443, 29209443),
(82829645, 82879645),
(84984473, 85034473),
(102521392, 102531392),
],
'16': [
(0, 60000),
(8636921, 8686921),
(34023150, 34173150),
(35285801, 46385801),
(88389383, 88439383),
(90294753, 90354753),
],
'17': [
(296626, 396626),
(21566608, 21666608),
(22263006, 25263006),
(34675848, 34725848),
(62410760, 62460760),
(77546461, 77596461),
(79709049, 79759049),
],
'18': [
(0, 10000),
(15410898, 18510898),
(52059136, 52209136),
(72283353, 72333353),
(75721820, 75771820),
(78017248, 78077248),
],
'19': [
(0, 60000),
(7346004, 7396004),
(8687198, 8737198),
(20523415, 20573415),
(24631782, 27731782),
(59118983, 59128983),
],
'20': [
(0, 60000),
(26319569, 29419569),
(29653908, 29803908),
(34897085, 34947085),
(61091437, 61141437),
(61213369, 61263369),
(62965520, 63025520),
],
'21': [
(0, 9411193),
(9595548, 9645548),
(9775437, 9825437),
(10034920, 10084920),
(10215976, 10365976),
(10647896, 10697896),
(11188129, 14338129),
(42955559, 43005559),
(43226828, 43227328),
(43249342, 43250842),
(44632664, 44682664),
(48119895, 48129895),
],
'22': [
(0, 16050000),
(16697850, 16847850),
(20509431, 20609431),
(50364777, 50414777),
(51244566, 51304566),
],
'X': [
(0, 60000),
(94821, 144821),
(231384, 281384),
(1047557, 1097557),
(1134113, 1184113),
(1264234, 1314234),
(2068238, 2118238),
(7623882, 7673882),
(10738674, 10788674),
(37098256, 37148256),
(49242997, 49292997),
(49974173, 50024173),
(52395914, 52445914),
(58582012, 61682012),
(76653692, 76703692),
(113517668, 113567668),
(115682290, 115732290),
(120013235, 120063235),
(143507324, 143557324),
(148906424, 148956424),
(149032062, 149082062),
(152277099, 152327099),
(155260560, 155270560),
],
'Y': [
(0, 2649520),
(8914955, 8964955),
(9241322, 9291322),
(10104553, 13104553),
(13143954, 13193954),
(13748578, 13798578),
(20143885, 20193885),
(22369679, 22419679),
(23901428, 23951428),
(28819361, 58819361),
(58917656, 58967656),
(59034049, 59373566),
]
}
CHROM_RENAME = {'X': '23', 'Y': '24'}
CGH_TEMPLATE_37 = u"""
<data formatVersion="2">
<pgdData><pgdDataEntry key="SPECIMEN_TYPE" value="BLASTOMERE"/></pgdData>
<noResults>
</noResults>
<pgd_reagents/>
<cgh mother="-1" father="-1" genomeBuild="hg19" softwareVersion="4.8.32" batched="false">
<submission design="031035" feFile="{}.txt" cghFile="{}.cgh" scanDate="1462520414000" barcode="{}" sampleCy3="true">
<notes/>
<sample sampleId="{}" male="{}"><phenotype/></sample>
<reference sampleId="Promega {}" male="{}"><phenotype/></reference>
<extra>
<datum category="Nanodrop" type="Sample DNA (ng)" dataType="Float"/>
<datum category="Sample Extraction" type="Sample Arrival Date" dataType="Date"/>
<datum category="Sample Extraction" type="Specimen Type" dataType="List">Blood</datum>
<datum category="Labelling" type="Lab User" dataType="String"/>
<datum category="Hyb & Wash" type="Cot1 Batch" dataType="String"/>
<datum category="Sample Extraction" type="Extraction Method" dataType="String"/>
<datum category="General" type="Reference Concentration" dataType="Float"/>
<datum category="Sample Extraction" type="A260/A280" dataType="Float"/>
<datum category="General" type="Assigned Technologist" dataType="String">MF</datum>
<datum category="Nanodrop" type="Reference DNA (pmoles)" dataType="Float"/>
<datum category="Labelling" type="Columns Used" dataType="List">Qiagen</datum>
<datum category="Nanodrop" type="Sample DNA (A260/A280)" dataType="Float"/>
<datum category="Labelling" type="Column Batch No" dataType="String"/>
<datum category="Sample Extraction" type="Extracted By" dataType="String"/>
<datum category="Hyb & Wash" type="Hyb Protocol" dataType="String"/>
<datum category="Labelling" type="Experiment Date" dataType="Date"/>
<datum category="General" type="Case Status" dataType="List"/>
<datum category="Labelling" type="Lab Notes" dataType="Text"/>
<datum category="Hyb & Wash" type="Hyb Buffer Batch" dataType="String"/>
<datum category="Nanodrop" type="Reference DNA (A260/A280)" dataType="Float"/>
<datum category="Labelling" type="Labelling Reagent Batch No" dataType="String"/>
<datum category="Hyb & Wash" type="Wash Protocol" dataType="String"/>
<datum category="Labelling" type="Labelling Protocol" dataType="List">Enzo</datum>
<datum category="Nanodrop" type="Reference DNA (ng)" dataType="Float"/>
<datum category="Nanodrop" type="Sample DNA (pmoles)" dataType="Float"/>
</extra>
</submission>
<excludedRegions>
</excludedRegions>
<qc>
<aqf key="SPIKES" value="null"/>
<aqf key="DLRSPREAD" value="0.1"/>
<aqf key="RED_SIGNAL_INTENSITY" value="1000.0"/>
<aqf key="GREEN_SIGNAL_INTENSITY" value="1000.0"/>
<aqf key="BG_NOISE_RED" value="5.0"/>
<aqf key="BG_NOISE_GREEN" value="5.0"/>
<aqf key="RED_SNR" value="100.0"/>
<aqf key="GREEN_SNR" value="100.0"/>
<aqf key="COMPARATIVE_SIGNAL_INTENSITY" value="0.11111111111"/>
<aqf key="REPRODUCIBILITY_GREEN" value="0.1111111"/>
<aqf key="REPRODUCIBILITY_RED" value="0.1111111"/>
<aqf key="NEG_CONTROL_RED" value="1.1111111"/>
<aqf key="NEG_CONTROL_GREEN" value="1.1111111"/>
<aqf key="FLAG_PERC" value="0.00722363"/>
<aqf key="SAT_PERC" value="0.0"/>
<aqf key="WAVINESS" value="0.0011111"/>
<aqf key="SNP_TROUGH_PEAK_RATIO" value="null"/>
<aqf key="SNP_GREY_AREA" value="null"/>
<aqf key="SNP_Red_Signal_Intensity" value="NaN"/>
<aqf key="SNP_Green_Signal_Intensity" value="NaN"/>
<aqf key="SNP_Ratio_Separation" value="0.0"/>
<aqf key="Percentage_Homozygosity" value="NaN"/>
<aqf key="SD" value="0.111111"/>
</qc>
<probes></probes>
<segmentation type="NORMALIZED"></segmentation>
</cgh>
</data>
"""
CGH_TEMPLATE_38 = u"""
<data formatVersion="2">
<pgdData><pgdDataEntry key="SPECIMEN_TYPE" value="BLASTOMERE"/></pgdData>
<noResults>
</noResults>
<pgd_reagents/>
<cgh mother="-1" father="-1" genomeBuild="hg38" softwareVersion="4.10.41" batched="false">
<submission design="031035" feFile="{}.txt" cghFile="{}.cgh" scanDate="1462520414000" barcode="{}" sampleCy3="true">
<notes/>
<sample sampleId="{}" male="{}"><phenotype/></sample>
<reference sampleId="Promega {}" male="{}"><phenotype/></reference>
<extra>
<datum category="Nanodrop" type="Sample DNA (ng)" dataType="Float"/>
<datum category="Sample Extraction" type="Sample Arrival | |
try:
self._Reset()
if self._Scanbeam is None:
return True
botY = self._PopScanbeam()
while True:
self._InsertLocalMinimaIntoAEL(botY)
self._HorzJoins = None
self._ProcessHorizontals()
topY = self._PopScanbeam()
if not self._ProcessIntersections(botY, topY):
return False
self._ProcessEdgesAtTopOfScanbeam(topY)
botY = topY
if self._Scanbeam is None and self._CurrentLocMin is None:
break
for outRec in self._PolyOutList:
if outRec.pts is None:
continue
_FixupOutPolygon(outRec)
if outRec.pts is None:
continue
if (outRec.isHole ^ self.ReverseSolution) == (
self._Area(outRec.pts) > 0.0
):
_ReversePolyPtLinks(outRec.pts)
if self._JoinList is not None:
self._JoinCommonEdges()
if self.ForceSimple:
self._DoSimplePolygons()
return True
finally:
self._JoinList = None
self._HorzJoins = None
# except:
# return False
def Execute(
self,
clipType,
solution,
subjFillType=PolyFillType.EvenOdd,
clipFillType=PolyFillType.EvenOdd,
):
if self._ExecuteLocked:
return False
try:
self._ExecuteLocked = True
self._UsingPolyTree = True
del solution[:]
self._SubjFillType = subjFillType
self._ClipFillType = clipFillType
self._ClipType = clipType
result = self._ExecuteInternal()
if result:
self._BuildResult(solution)
finally:
self._ExecuteLocked = False
self._UsingPolyTree = False
return result
def Execute2(
self,
clipType,
solutionTree,
subjFillType=PolyFillType.EvenOdd,
clipFillType=PolyFillType.EvenOdd,
):
if self._ExecuteLocked:
return False
try:
self._ExecuteLocked = True
self._UsingPolyTree = True
solutionTree.Clear()
self._SubjFillType = subjFillType
self._ClipFillType = clipFillType
self._ClipType = clipType
result = self._ExecuteInternal()
if result:
self._BuildResult2(solutionTree)
finally:
self._ExecuteLocked = False
self._UsingPolyTree = False
return result
def _BuildResult(self, polygons):
for outRec in self._PolyOutList:
if outRec is None:
continue
cnt = _PointCount(outRec.pts)
if cnt < 3:
continue
poly = []
op = outRec.pts
for _ in range(cnt):
poly.append(op.pt)
op = op.prevOp
polygons.append(poly)
return
def _BuildResult2(self, polyTree):
for outRec in self._PolyOutList:
if outRec is None:
continue
cnt = _PointCount(outRec.pts)
if cnt < 3:
continue
_FixHoleLinkage(outRec)
# add nodes to _AllNodes list ...
polyNode = PolyNode()
polyTree._AllNodes.append(polyNode)
outRec.PolyNode = polyNode
op = outRec.pts
while True:
polyNode.Contour.append(op.pt)
op = op.prevOp
if op == outRec.pts:
break
# build the tree ...
for outRec in self._PolyOutList:
if outRec.PolyNode is None:
continue
if outRec.FirstLeft is None:
polyTree._AddChild(outRec.PolyNode)
else:
outRec.FirstLeft.PolyNode._AddChild(outRec.PolyNode)
return
# ===============================================================================
# OffsetPolygons (+ ancilliary functions)
# ===============================================================================
def _GetUnitNormal(pt1, pt2):
if pt2.x == pt1.x and pt2.y == pt1.y:
return FloatPoint(0.0, 0.0)
dx = float(pt2.x - pt1.x)
dy = float(pt2.y - pt1.y)
f = 1.0 / math.hypot(dx, dy)
dx = float(dx) * f
dy = float(dy) * f
return FloatPoint(dy, -dx)
def _GetBounds(pts):
left = None
for poly in pts:
for pt in poly:
left = pt.x
top = pt.y
right = pt.x
bottom = pt.y
break
break
for poly in pts:
for pt in poly:
if pt.x < left:
left = pt.x
if pt.x > right:
right = pt.x
if pt.y < top:
top = pt.y
if pt.y > bottom:
bottom = pt.y
if left is None:
return Rect(0, 0, 0, 0)
else:
return Rect(left, top, right, bottom)
def _GetLowestPt(poly):
# precondition: poly must not be empty
result = poly[0]
for pt in poly:
if pt.y > result.y or (pt.y == result.y and pt.x < result.x):
result = pt
return result
def _StripDupPts(poly):
if poly == []:
return poly
for i in range(1, len(poly)):
if _PointsEqual(poly[i - 1], poly[i]):
poly.pop(i)
i = len(poly) - 1
while i > 0 and _PointsEqual(poly[i], poly[0]):
poly.pop(i)
i -= 1
return poly
def _OffsetInternal(
polys, isPolygon, delta, jointype=JoinType.Square, endtype=EndType.Square, limit=0.0
):
def _DoSquare(pt):
# see offset_triginometry.svg in the documentation folder ...
dx = math.tan(
math.atan2(sinA, Normals[k].x * Normals[j].x + Normals[k].y * Normals[j].y)
/ 4
)
result.append(
Point(
round(pt.x + delta * (Normals[k].x - Normals[k].y * dx)),
round(pt.y + delta * (Normals[k].y + Normals[k].x * dx)),
)
)
result.append(
Point(
round(pt.x + delta * (Normals[j].x + Normals[j].y * dx)),
round(pt.y + delta * (Normals[j].y - Normals[j].x * dx)),
)
)
return
def _DoMiter(pt, r):
q = delta / r
result.append(
Point(
round(pt.x + (Normals[k].x + Normals[j].x) * q),
round(pt.y + (Normals[k].y + Normals[j].y) * q),
)
)
return
def _DoRound(pt):
a = math.atan2(sinA, Normals[k].x * Normals[j].x + Normals[k].y * Normals[j].y)
steps = round(step360 * abs(a))
X, Y = Normals[k].x, Normals[k].y
for _ in range(steps):
result.append(Point(round(pt.x + X * delta), round(pt.y + Y * delta)))
X2 = X
X = X * mcos - msin * Y
Y = X2 * msin + Y * mcos
result.append(
Point(
round(pt.x + Normals[j].x * delta), round(pt.y + Normals[j].y * delta)
)
)
return
def GetSin():
result = Normals[k].x * Normals[j].y - Normals[j].x * Normals[k].y
if result > 1.0:
result = 1.0
elif result < -1.0:
result = -1.0
return result
def _OffsetPoint(jointype):
if sinA * delta < 0:
result.append(
Point(
round(pts[j].x + Normals[k].x * delta),
round(pts[j].y + Normals[k].y * delta),
)
)
result.append(pts[j])
result.append(
Point(
round(pts[j].x + Normals[j].x * delta),
round(pts[j].y + Normals[j].y * delta),
)
)
elif jointype == JoinType.Miter:
r = 1.0 + (Normals[j].x * Normals[k].x + Normals[j].y * Normals[k].y)
if r >= miterLim:
_DoMiter(pts[j], r)
else:
_DoSquare(pts[j])
elif jointype == JoinType.Square:
_DoSquare(pts[j])
else:
_DoRound(pts[j])
return j
if delta == 0:
return polys
if not isPolygon and delta < 0:
delta = -delta
if jointype == JoinType.Miter:
# miterLim: see offset_triginometry3.svg in the documentation folder ...
if limit > 2:
miterLim = 2 / (limit * limit)
else:
miterLim = 0.5
if endtype == EndType.Round:
limit = 0.25
if jointype == JoinType.Round or endtype == EndType.Round:
if limit <= 0:
limit = 0.25
elif limit > abs(delta) * 0.25:
limit = abs(delta) * 0.25
# step360: see offset_triginometry2.svg in the documentation folder ...
step360 = math.pi / math.acos(1 - limit / abs(delta))
msin = math.sin(2 * math.pi / step360)
mcos = math.cos(2 * math.pi / step360)
step360 /= math.pi * 2
if delta < 0:
msin = -msin
res = []
ppts = polys[:]
for pts in ppts:
Normals = []
result = []
cnt = len(pts)
if cnt == 0 or cnt < 3 and delta <= 0:
continue
if cnt == 1:
if jointype == JoinType.Round:
X, Y = 1.0, 0.0
for _ in range(round(step360 * 2 * math.pi)):
result.append(
Point(round(pts[0].x + X * delta), round(pts[0].y + Y * delta))
)
X2 = X
X = X * mcos - msin * Y
Y = X2 * msin + Y * mcos
else:
X, Y = -1.0, -1.0
for _ in range(4):
result.append(
Point(round(pts[0].x + X * delta), round(pts[0].y + Y * delta))
)
if X < 0:
X = 1
elif Y < 0:
Y = 1
else:
X = -1
continue
forceClose = _PointsEqual(pts[0], pts[cnt - 1])
if forceClose:
cnt -= 1
for j in range(cnt - 1):
Normals.append(_GetUnitNormal(pts[j], pts[j + 1]))
if isPolygon or forceClose:
Normals.append(_GetUnitNormal(pts[cnt - 1], pts[0]))
else:
Normals.append(Normals[cnt - 2])
if isPolygon or forceClose:
k = cnt - 1
for j in range(cnt):
sinA = GetSin()
k = _OffsetPoint(jointype)
res.append(result)
if not isPolygon:
result = []
delta = -delta
k = cnt - 1
for j in range(cnt):
sinA = GetSin()
k = _OffsetPoint(jointype)
delta = -delta
res.append(result[::-1])
else:
# offset the polyline going forward ...
k = 0
for j in range(1, cnt - 1):
sinA = GetSin()
k = _OffsetPoint(jointype)
# handle the end (butt, round or square) ...
if endtype == EndType.Butt:
j = cnt - 1
pt1 = Point(
round(float(pts[j].x) + Normals[j].x * delta),
round(float(pts[j].y) + Normals[j].y * delta),
)
result.append(pt1)
pt1 = Point(
round(float(pts[j].x) - Normals[j].x * delta),
round(float(pts[j].y) - Normals[j].y * delta),
)
result.append(pt1)
else:
j = cnt - 1
k = cnt - 2
Normals[j] = FloatPoint(-Normals[j].x, -Normals[j].y)
if endtype == EndType.Square:
_DoSquare(pts[j])
else:
_DoRound(pts[j])
# re-build Normals ...
for j in range(cnt - 1, 0, -1):
Normals[j] = FloatPoint(-Normals[j - 1].x, -Normals[j - 1].y)
Normals[0] = FloatPoint(-Normals[1].x, -Normals[1].y)
# offset the polyline going backward ...
k = cnt - 1
for j in range(cnt - 2, 0, -1):
sinA = GetSin()
k = _OffsetPoint(jointype)
# finally handle the start (butt, round or square) ...
if endtype == EndType.Butt:
pt1 = Point(
round(float(pts[0].x) - Normals[0].x * delta),
round(float(pts[0].y) - Normals[0].y * delta),
)
result.append(pt1)
pt1 = Point(
round(float(pts[0].x) + Normals[0].x * delta),
round(float(pts[0].y) + Normals[0].y * delta),
)
result.append(pt1)
else:
j = 0
k = 1
if endtype == EndType.Square:
_DoSquare(pts[0])
else:
_DoRound(pts[0])
| |
#!/usr/bin/env python3
import re
import sys
import os
import argparse
import logging
import itertools
import collections.abc
from collections import defaultdict
from lxml import etree
# Typing
import io
from typing import Sequence, Mapping, Iterator, Generic, AbstractSet, Callable
from typing import Any, List, Tuple
from typing import TypeVar, Union
from typing.io import TextIO
from numbers import Number
from lxml.etree import _Element as Element
from lxml.etree import XPath
TEX_ESCAPES = [
('\\', r'\textbackslash'),
('&', r'\&'),
('%', r'\%'),
('$', r'\$'),
('#', r'\#'),
('_', r'\_'),
('{', r'\{'),
('}', r'\}'),
('~', r'\textasciitilde{}'),
('^', r'\textasciicircum{}'),
(r'\textbackslash', r'\textbackslash{}'),
]
KNOWN_FORMATS = {
'mono': (r'\texttt{%s}', True),
'verb': (r'\verb|%s|', False),
'italic': (r'\textit{%s}', False),
'math': (r'$ %s $', True),
'bold': (r'\textbf{%s}', False),
}
EMPTY_ELEMENT = etree.Element('empty')
SORTING_TRANSFORMS = {
'ident': lambda x: x,
'ip': lambda x: [int(i) for i in x.split('.')],
'port': lambda x: int(x.split('/')[0]) if x else -1
}
logger = logging.getLogger('xpath2tex')
T = TypeVar('T')
class AnyMap(collections.abc.Mapping, Generic[T]):
def __init__(self, item: T):
self.item = item
def __getitem__(self, value: Any) -> T:
return self.item
def __len__(self) -> int:
return 1
def __iter__(self) -> Iterator[Any]:
return iter((None,))
def __repr__(self) -> str:
return 'AnyMap({:r})'.format(self.item)
def ensure_str(item) -> str:
"""Ensures that the element passed in turns into a string.
First, if given a sequence of length 1, extract the element.
If given `str`, it retuns `item`.
If given `bytes`, it decodes it with UTF-8.
If given `etree._Element`, it gets the internal text.
If given a number, return `str(item)`.
Otherwise, give a warning and return `str(item)`.
"""
if isinstance(item, Sequence):
if len(item) == 1:
item = item[0]
elif len(item) == 0:
return ''
elif not isinstance(item, (str, bytes)):
return ', '.join(ensure_str(i) for i in item)
if item is None:
return ''
if isinstance(item, str):
return item
if isinstance(item, bytes):
return item.decode()
if isinstance(item, Element):
return ensure_str(item.text)
if isinstance(item, Number):
return str(item)
logger.warning('Could not identify string conversion of %r', item)
return str(item)
def query_row(
row: Element,
col_xpaths: Sequence[Sequence[XPath]],
col_relatives: Mapping[XPath, List[int]]) -> List[str]:
col_relatives = {
next(iter(xpath(row)), EMPTY_ELEMENT): cols
for xpath, cols in col_relatives.items()
}
rel = {
col: element
for element, cols in col_relatives.items()
for col in cols
}
return [
next(filter(
None,
(ensure_str(xpath(rel.get(n, row))) for xpath in xpaths)),
'')
for n, xpaths in enumerate(col_xpaths)
]
def query_row_group(
row: Element,
group: int,
col_xpaths: Sequence[Sequence[XPath]],
col_relatives: Mapping[XPath, List[int]]) -> Tuple[List[str], List[List[str]]]:
grouped, non_grouped = col_xpaths[:group], col_xpaths[group:]
non_grouped_relative = {
tuple(xpath(row)): cols
for xpath, cols in col_relatives.items()
}
rel = {
col: element
for element, cols in non_grouped_relative.items()
for col in cols
}
return (
query_row(row, grouped, col_relatives),
[
[ensure_str(result) for result in xpath(row)]
if n not in rel else
[ensure_str(xpath(rel_row)) for rel_row in rel[n]]
for n, xpaths in enumerate(non_grouped, group)
for xpath in xpaths
]
)
def replace_multiple(s: str, replaces: Sequence[Tuple[str, str]]) -> str:
for from_, to in replaces:
s = s.replace(from_, to)
return s
def escape_tex(s: str) -> str:
return replace_multiple(s, TEX_ESCAPES)
def format_item(item: str, fmt: str=None, escape: bool=True) -> str:
if escape:
item = escape_tex(item)
if fmt is not None:
item = fmt % item
return item
def format_row(
cols: Sequence[str],
row_style: str='%s',
col_defaults: Mapping[int, str]=None,
col_formats: Mapping[int, str]=None,
col_escapes: Mapping[int, bool]=None,
col_group: int=None,
skip_cols: AbstractSet[int]=set(),) -> str:
col_defaults = col_defaults or {}
col_formats = col_formats or {}
col_escapes = col_escapes or {}
return ' & '.join(
format_item(
col or col_defaults.get(n, ''),
col_formats.get(n),
col_escapes.get(n, True))
for n, col in enumerate(cols)
if n not in skip_cols
) + r' \\'
def format_row_group(
group_cols: List[str],
non_group_cols: List[List[str]],
col_defaults: Mapping[int, str]=None,
col_formats: Mapping[int, str]=None,
col_escapes: Mapping[int, bool]=None,
is_last: bool=False,
skip_cols: AbstractSet[int]=set(),
**kwargs) -> Iterator[str]:
assert len(group_cols) > 0
assert len(non_group_cols) > 0
n_rows = max(map(len, non_group_cols))
# print((group_cols, non_group_cols), file=sys.stderr)
yield r'\multirow[t]{%d}{*}{%s} & %s' % (
n_rows,
format_item(
group_cols[0] or col_defaults.get(0, ''),
col_formats.get(0),
col_escapes.get(0, True)),
format_row(
[''] + group_cols[1:] +
[col_rows[0] if col_rows else '' for col_rows in non_group_cols],
col_defaults=col_defaults,
col_formats=col_formats,
col_escapes=col_escapes,
skip_cols=skip_cols | {0},
**kwargs))
for extra_row in itertools.islice(itertools.zip_longest(
*non_group_cols, fillvalue=''), 1, None):
yield format_row([''] * len(group_cols) + list(extra_row), **kwargs)
if not is_last:
yield '\\midrule\n'
def enum_rows(
in_file: TextIO,
row_xpath: str,
col_xpaths: Sequence[Union[str, Sequence[str]]],
row_style: str='%s',
col_group: int=0,
sort_by: Tuple[int, Callable[[str], Any]]=None,
skip_cols: AbstractSet[int]=set(),
col_relatives: Mapping[str, List[int]]=None,
col_defaults: Mapping[int, str]=None,
col_formats: Mapping[int, str]=None,
col_escapes: Mapping[int, bool]=None) -> Iterator[str]:
col_defaults = col_defaults or {}
col_formats = col_formats or {}
col_escapes = col_escapes or {}
col_relatives = col_relatives or {}
row_xpath = XPath(row_xpath)
col_xpaths = [
[XPath(i)] if isinstance(i, str) else [XPath(j) for j in i]
for i in col_xpaths]
col_relatives = {XPath(k): v for k, v in col_relatives.items()}
tree = etree.parse(in_file)
if col_group > 0:
rows = row_xpath(tree)
for n, row_element in enumerate(rows):
group_cols, non_group_cols = query_row_group(
row_element, col_group, col_xpaths, col_relatives)
yield from format_row_group(
group_cols=group_cols,
non_group_cols=non_group_cols,
row_style=row_style,
col_formats=col_formats,
col_escapes=col_escapes,
col_defaults=col_defaults,
skip_cols=skip_cols,
is_last=n == len(rows) - 1
)
return
row_cols = [
query_row(row_element, col_xpaths, col_relatives)
for row_element in row_xpath(tree)]
if sort_by is not None:
col_sort, translate = sort_by
if translate is None:
translate = SORTING_TRANSFORMS['ident']
reverse = col_sort < 0
sort_by = abs(col_sort)
row_cols.sort(
key=lambda cols: translate(ensure_str(cols[col_sort])),
reverse=reverse)
for cols in row_cols:
yield format_row(
cols=cols,
row_style=row_style,
col_formats=col_formats,
col_escapes=col_escapes,
col_defaults=col_defaults,
skip_cols=skip_cols
)
def output_xml(
in_filename: str,
col_xpaths: Sequence[Union[str, Sequence[str]]],
out_file: io.TextIOBase=sys.stdout,
col_names: Sequence[str]=None,
row_aligns: Sequence[str]=None,
print_environment: bool=False,
skip_cols: AbstractSet[int]=set(),
environment_name: str='tabular',
**kwargs) -> None:
if col_xpaths is None:
col_xpaths = ['text()']
if print_environment:
if row_aligns is None:
row_aligns = 'l' * (len(col_xpaths) - len(skip_cols))
print(r'\begin{%s}{%s}\toprule' % (environment_name, row_aligns), file=out_file)
if col_names is not None:
print(
' & '.join(
r'\textbf{%s}' % escape_tex(name)
for n, name in enumerate(col_names)
if n not in skip_cols
) + r'\\\midrule',
file=out_file)
with open(in_filename) as f:
rows = enum_rows(
f,
col_xpaths=col_xpaths,
skip_cols=skip_cols,
**kwargs)
for row in rows:
print(row, file=out_file)
if print_environment:
print('\\bottomrule\n\\end{%s}' % environment_name, file=out_file)
def parse_formats(formats: List[str]) -> \
Tuple[Mapping[int, str], Mapping[int, bool]]:
col_formats = {}
col_escapes = {}
expr = re.compile(r'^(?:(\d+(?:,\d+)*):)?(\!?)(.*)$')
for fmt in formats:
m = expr.match(fmt)
if m is None:
raise ValueError('Format %s is invalid' % fmt)
cols, no_escape, expr = m.groups()
if expr in KNOWN_FORMATS:
expr, escape = KNOWN_FORMATS[expr]
else:
escape = not no_escape
cols = list(map(int, filter(None, (cols or '').split(','))))
if not cols:
col_formats = AnyMap(expr)
col_escapes = AnyMap(escape)
break
for col in cols:
if col in col_formats:
raise ValueError('Column %d format set more than once' % col)
col_formats[col] = expr
col_escapes[col] = escape
return col_formats, col_escapes
def parse_defaults(defaults: Mapping[str, str]) -> Mapping[int, str]:
col_defaults = {}
expr = re.compile(r'^(\d+(?:,\d+)*)$')
for cols, text in defaults:
m = expr.match(cols)
if m is None:
raise ValueError('Column expression %s invalid' % cols)
cols = [int(i) for i in m.group().split(',')]
for col in cols:
if col in col_defaults:
raise ValueError('Column %d default set more than once' % col)
col_defaults[col] = text
return col_defaults
def parse_relatives(relatives: List[str]) -> Mapping[str, List[int]]:
if relatives is None:
return {}
col_relative = defaultdict(list)
expr = re.compile(r'^(\d+(?:,\d+)*):(.+)$')
for text in relatives:
m = expr.match(text)
if m is None:
raise ValueError('Column expression %s invalid' % cols)
cols, xpath = m.groups()
cols = [int(i) for i in cols.split(',')]
col_relative[xpath].extend(cols)
return dict(col_relative)
def merge_config(current, other):
current = current.copy()
for key, val in other.items():
if current.get(key) is None:
current[key] = val
continue
cval = current[key]
logger.debug('Merging key %s', key)
if isinstance(val, dict):
d = cval.copy()
d.update(val)
current[key] = d
elif isinstance(val, list):
current[key] = cval + val
elif isinstance(val, set):
current[key] = cval | val
elif isinstance(val, (str, int)):
logger.warning('Overwriting key %s' % key)
current[key] = val
else:
current[key] = val
return current
def get_config(args):
col_formats, col_escapes = parse_formats(args.formats)
col_defaults = parse_defaults(args.defaults)
col_relatives = parse_relatives(args.relatives)
kwargs = {
'in_filename': args.file,
'row_xpath': args.rows,
'col_xpaths': args.cols,
'col_formats': col_formats,
'col_escapes': col_escapes,
'col_defaults': col_defaults,
'skip_cols': args.skip_cols,
'row_style': args.row_style,
'col_names': args.names,
'col_relatives': col_relatives,
'sort_by': args.sort_by,
'row_aligns': args.align,
'print_environment': args.print_environment,
'col_group': args.group,
'environment_name': args.environment_name,
}
if args.rows.startswith('auto:'):
del kwargs['row_xpath']
config_file = os.path.join(
os.path.split(os.path.realpath(__file__))[0],
'auto', args.rows[5:]) + '.py'
with open(config_file) as f:
code_str = f.read()
config = {}
try:
config = eval(compile(code_str, config_file, 'eval'))
except SyntaxError:
l = {}
exec(compile(code_str, config_file, 'exec'), None, l)
if 'config' not in l:
raise NameError('config not defined')
config = l['config']
kwargs = merge_config(kwargs, config)
n_cols = len(kwargs['col_xpaths'] or [])
invalid_col = next((
col
for groups in (col_formats, col_escapes, col_defaults)
for col in groups
if col | |
len(self._group[name]) > 0:
if empty_only:
raise ValueError(f"Cannot remove non-empty group '{name}'.")
else:
print("Warning: Group to remove is not empty.")
self._group.pop(name)
def add_group_member(self, group, member, **kws):
"""Add a *member* to *group*, if *group* is new, add and update
*member* group only when *new* if True.
Parameters
----------
group : str
Group name.
member :
CaElement.
Keyword Arguments
-----------------
new : True or False
If *group* is new, add and update when *new* is True, or ignore.
"""
new = kws.get('new', True)
elem = self._find_exact_element(member)
if elem is None:
raise ValueError(f"Invalid element '{member}'.")
if group in self._group:
if elem in self._group[group]:
msg = "'{0}' is already in group: '{1}'.".format(
elem.name, group)
print("Warning: {0}".format(msg))
_LOGGER.warning(msg)
return
else:
elem.group.add(group)
_inplace_order_insert(elem, self._group[group])
msg = "Add '{0}' into group '{1}'.".format(
elem.name, group)
_LOGGER.info(msg)
elif new:
self._group[group] = [elem]
elem.group.add(group)
msg = "Add '{0}' into new group '{1}'.".format(
elem.name, group)
_LOGGER.info(msg)
else:
raise ValueError(
"Group {} does not exist, use 'new=True' to add it.".format(
group))
def has_group(self, name):
"""Check if group exists or not.
Parameters
----------
name : str
Group name.
Returns
-------
ret : True or False
True if has group *name* or False.
"""
return name in self._group
def remove_group_member(self, group, member):
"""Remove a *member* from *group*.
Parameters
----------
group : str
Group name.
member :
CaElement.
"""
if group not in self._group:
raise ValueError(
"Remove error: group '{}' does not exist.".format(group))
if member in self._group[group]:
self._group[group].remove(member)
else:
raise ValueError(
"Remove error: '{}' not in group '{}'.".format(
member, group))
def get_groups(self, name=None, element=None, **kws):
"""Get groups filtered by *name*, if *element* is given, a list of
groups that *element* belongs to would return.
Parameters
----------
name : str
Group name string, could be Unix shell style pattern.
element : str
Element name.
Keyword Arguments
-----------------
empty : True or False
If *empty* is True, also return name the empty groups, else not,
True by default.
Returns
-------
ret : list
List of group names.
"""
if element is None:
if kws.get('empty', True):
g = [k for k, v in self._group.items() if fnmatch(k, name)]
else:
g = [k for k, v in self._group.items() if fnmatch(k, name)
and v != []]
return g
else:
return [k for k, v in self._group.items()
if fnmatch(k, name) and element in [el.name for el in v]]
def get_group_members(self, group, **kws):
"""Return element members by applying proper filtering operation on
each group from *group*, filtering operation could be defined by
keyword argument *op*.
Parameters
----------
group: str or list
Group name string or list[str], could be Unix shell style pattern.
Keyword Arguments
-----------------
op : str
Valid options: ``and``, ``or``.
Returns
-------
ret : list
List of elements.
"""
op = kws.get('op', 'and')
if isinstance(group, str):
group = group,
group_list = flatten(
[[g for g in self._group if fnmatch(g, gi)] for gi in group]
)
elem_dict = {g: self._group[g] for g in group_list}
if op == 'and':
return get_intersection(**elem_dict)
else: # op = 'or'
return list(set(flatten(elem_dict.values())))
@property
def orm(self):
"""Array: Orbit response matrix.
See Also
--------
:func:`~phantasy.library.physics.orm.get_orm`
Calculator orbit response matrix.
"""
return self._orm
@orm.setter
def orm(self, m):
self._orm = m
def correct_orbit(self, correctors, bpms, **kws):
"""Correct orbit by using ORM.
Parameters
----------
correctors : list
List of corrector elements.
bpms : list
List of BPM elements.
Keyword Arguments
-----------------
cor_field : str
Field name for correctors, ``'ANG'`` by default.
orb_field : tuple[str]
Field names for monitors to retrieve orbit data, ``('X', 'Y')`` for
*x* and *y* directions by default.
xoy : str
'x'('y') for monitoring 'x'('y') direction,'xy' for both (default).
damping_factor : float
Factor to correct orbit, default is 0.05, which would decrease beam
orbit (BPM readings) by 5% for every correction.
iteration : int
Iteration numbers of correction, default is 1.
wait : float
Wait time after set value, in *sec*, 1.0 by default.
echo : bool
Print out message or not, default is True.
msg_queue : Queue
A queue that keeps log messages.
mode : str
If running under 'interactive' mode or not.
cor_min : float
Lower limit for corrector settings.
cor_max : float
Upper limit for corrector settings.
Returns
-------
r : bool
True if no errors happen.
See Also
--------
get_settings_from_orm : calculate COR settings from ORM for orbit
correction.
apply_settings_from_orm : apply COR settings from ORM to do orbit
correction.
"""
itern = kws.get('iteration', 1)
cor_field = kws.get('cor_field', 'ANG')
damp_fac = kws.get('damping_factor', 0.05)
wait = kws.get('wait', 1.0)
echo = kws.get('echo', True)
q_msg = kws.get('msg_queue', None)
mode = kws.get('mode', 'interactive')
upper_limit_cor = kws.get('cor_max', 5.0) # A
lower_limit_cor = kws.get('cor_min', -5.0) # A
if self._orm is None:
_LOGGER.error("correct_orbit: ORM is not available, set ORM first.")
raise RuntimeError("INVALID ORM data.")
m = self._orm
m_inv = inverse_matrix(m)
n_cor = len(correctors)
for i in range(1, itern + 1):
bpm_readings = get_orbit(bpms, **kws)
delt_cor = np.dot(m_inv, -bpm_readings * damp_fac)
for ic, (e, v) in enumerate(zip(correctors, delt_cor)):
v0 = getattr(e, cor_field)
v_to_set = limit_input(v0 + v,
lower_limit_cor, upper_limit_cor)
setattr(e, cor_field, v_to_set)
time.sleep(wait)
msg = "[{0}] #[{1}]/[{2}] Set [{3:02d}] {4} [{5}]: {6:>10.6g}.".format(
epoch2human(time.time(), fmt=TS_FMT),
i, itern, ic + 1, e.name, cor_field, v_to_set)
if q_msg is not None:
q_msg.put((((ic + (i - 1) * n_cor))* 100.0 / n_cor / itern, msg))
if echo:
print(msg)
if i+1 > itern:
break
if mode != 'interactive':
next_iter = 'Y'
else:
next_iter = input(
"Continue correction iteration: {0}/{1}? ([Y]/N)".format(i + 1,
itern)
)
if next_iter.upper() in ['Y', '']:
continue
else:
break
return True
def apply_settings_from_orm(self, settings, **kws):
"""Apply correctors settings calculated from OMR to do orbit correction.
Parameters
----------
settings : list
List of tuple of (CaElement, field, setting, setting_limited).
Keyword Arguments
-----------------
iteration : int
Iteration numbers of correction, default is 1.
wait : float
Wait time after set value, in *sec*, 1.0 by default.
echo : bool
Print out message or not, default is True.
msg_queue : Queue
A queue that keeps log messages.
mode : str
If running under 'interactive' mode or not.
cor_min : float
Lower limit for corrector settings.
cor_max : float
Upper limit for corrector settings.
See Also
--------
get_settings_from_orm : calculate COR settings from ORM for orbit
correction.
correct_orbit : calculate and apply COR settings from ORM to do orbit
correction.
"""
itern = kws.get('iteration', 1)
wait = kws.get('wait', 1.0)
echo = kws.get('echo', True)
q_msg = kws.get('msg_queue', None)
mode = kws.get('mode', 'interactive')
upper_limit_cor = kws.get('cor_max', 5.0) # A, void in this method
lower_limit_cor = kws.get('cor_min', -5.0) # A
n_cor = len(settings)
for i in range(1, itern + 1):
for ic, (cor, cor_field, v, v_limited) in enumerate(settings):
#v_to_set = limit_input(v, lower_limit_cor, upper_limit_cor)
v_to_set = v_limited
setattr(cor, cor_field, v_to_set)
time.sleep(wait)
msg = "[{0}] #[{1}]/[{2}] Set [{3:02d}] {4} [{5}]: {6:>10.6g}.".format(
epoch2human(time.time(), fmt=TS_FMT),
i, itern, ic + 1, cor.name, cor_field, v_to_set)
if q_msg is not None:
q_msg.put((((ic + (i - 1) * n_cor))* 100.0 / n_cor / itern, msg))
if echo:
print(msg)
if i+1 > itern:
break
if mode != 'interactive':
next_iter = 'Y'
else:
next_iter = input(
"Continue correction iteration: {0}/{1}? ([Y]/N)".format(i + 1,
itern)
)
if next_iter.upper() in ['Y', '']:
continue
else:
break
return True
def apply_setting(self, setting, **kws):
"""Apply setting for one corrector.
Parameters
----------
setting : tuple
Tuple of corrector setting:
(CaElement, field, setpoint, setpoint_limited).
Keyword Arguments
-----------------
wait : float
Wait time after set value, in *sec*, 1.0 by default.
msg_queue : Queue
A queue that keeps log messages.
idx : int
Index of selected corrector of all selected ones.
ncor : int
Total number of selected correctors.
ndigits : int
Number of effective digits to keep for a float number.
"""
wait = kws.get('wait', 1.0)
idx = kws.get('idx', 0.0) # index of correctors
n = kws.get('ncor', 1) # total number of correctors
q_msg = kws.get('msg_queue', None)
n_trun = kws.get('ndigits', 6)
cor, cor_field, v, v_limited = setting
v_truncated = truncate_number(v_limited, n_trun)
setattr(cor, cor_field, v_truncated)
time.sleep(wait)
msg = "[{0}] Set [{1:02d}] {2} | |
for x, y in walk_coords]
polygon = Polygon(*points, color = WHITE)
if self.color_foreground_not_background:
polygon.stroke_width = border_stroke_width
polygon.color_using_background_image(self.background_image_file)
total_run_time = len(points) * self.step_run_time
polygon_anim = ShowCreation(polygon, run_time = total_run_time, rate_func=linear)
walker_anim = empty_animation
start_wind = 0
for i in range(len(walk_coords)):
start_coords = walk_coords[i]
end_coords = walk_coords[(i + 1) % len(walk_coords)]
# We need to do this roundabout default argument thing to get the closure we want,
# so the next iteration changing start_coords, end_coords doesn't change this closure
val_alpha_func = lambda a, start_coords = start_coords, end_coords = end_coords : self.func(interpolate(start_coords, end_coords, a))
if self.display_wind:
clockwise_val_func = lambda p : -point_to_rev(self.func(p))
alpha_winder = make_alpha_winder(clockwise_val_func, start_coords, end_coords, self.num_checkpoints)
number_update_func = lambda alpha, alpha_winder = alpha_winder, start_wind = start_wind: alpha_winder(alpha) - alpha_winder(0) + start_wind
start_wind = 0 if i + 1 in self.wind_reset_indices else number_update_func(1)
elif self.display_size:
# We need to do this roundabout default argument thing to get the closure we want,
# so the next iteration changing val_alpha_func doesn't change this closure
number_update_func = lambda a, val_alpha_func = val_alpha_func : point_to_rescaled_size(val_alpha_func(a)) # We only use this for diagnostics
else:
number_update_func = None
new_anim = LinearWalker(
start_coords = start_coords,
end_coords = end_coords,
coords_to_point = num_plane.coords_to_point,
val_func = self.func,
remover = (i < len(walk_coords) - 1),
show_arrows = not self.show_output,
scale_arrows = self.scale_arrows,
number_update_func = number_update_func,
run_time = self.step_run_time,
walker_stroke_color = WALKER_LIGHT_COLOR if self.color_foreground_not_background else BLACK,
num_decimal_places = self.num_decimal_places,
include_background_rectangle = self.include_background_rectangle,
)
if self.display_odometer:
# Discard above animation and show an odometer instead
# We need to do this roundabout default argument thing to get the closure we want,
# so the next iteration changing val_alpha_func doesn't change this closure
rev_func = lambda a, val_alpha_func = val_alpha_func : point_to_rev(val_alpha_func(a))
base_arrow = Arrow(ORIGIN, RIGHT, buff = 0)
new_anim = FuncRotater(base_arrow,
rev_func = rev_func,
run_time = self.step_run_time,
rate_func=linear,
remover = i < len(walk_coords) - 1,
)
walker_anim = Succession(walker_anim, new_anim)
# TODO: Allow smooth paths instead of breaking them up into lines, and
# use point_from_proportion to get points along the way
if self.display_odometer:
color_wheel = Circle(radius = ODOMETER_RADIUS)
color_wheel.stroke_width = ODOMETER_STROKE_WIDTH
color_wheel.color_using_background_image(self.short_path_to_long_path("pure_color_map.png")) # Manually inserted here; this is unclean
self.add(color_wheel)
self.play(walker_anim)
else:
if self.draw_lines:
self.play(polygon_anim, walker_anim)
else:
# (Note: Turns out, play is unhappy playing empty_animation, as had been
# previous approach to this toggle; should fix that)
self.play(walker_anim)
self.wait()
class PiWalkerRect(PiWalker):
CONFIG = {
"start_x" : -1,
"start_y" : 1,
"walk_width" : 2,
"walk_height" : 2,
"func" : plane_func_from_complex_func(lambda c: c**2),
"double_up" : False,
# New default for the scenes using this:
"display_wind" : True
}
def setup(self):
TL = np.array((self.start_x, self.start_y))
TR = TL + (self.walk_width, 0)
BR = TR + (0, -self.walk_height)
BL = BR + (-self.walk_width, 0)
self.walk_coords = [TL, TR, BR, BL]
if self.double_up:
self.walk_coords = self.walk_coords + self.walk_coords
PiWalker.setup(self)
class PiWalkerCircle(PiWalker):
CONFIG = {
"radius" : 1,
"num_steps" : 100,
"step_run_time" : 0.01
}
def setup(self):
r = self.radius
N = self.num_steps
self.walk_coords = [r * np.array((np.cos(i * TAU/N), np.sin(i * TAU/N))) for i in range(N)]
PiWalker.setup(self)
def split_interval(xxx_todo_changeme10):
(a, b) = xxx_todo_changeme10
mid = (a + b)/2.0
return ((a, mid), (mid, b))
# I am surely reinventing some wheel here, but what's done is done...
class RectangleData():
def __init__(self, x_interval, y_interval):
self.rect = (x_interval, y_interval)
def get_top_left(self):
return np.array((self.rect[0][0], self.rect[1][1]))
def get_top_right(self):
return np.array((self.rect[0][1], self.rect[1][1]))
def get_bottom_right(self):
return np.array((self.rect[0][1], self.rect[1][0]))
def get_bottom_left(self):
return np.array((self.rect[0][0], self.rect[1][0]))
def get_top(self):
return (self.get_top_left(), self.get_top_right())
def get_right(self):
return (self.get_top_right(), self.get_bottom_right())
def get_bottom(self):
return (self.get_bottom_right(), self.get_bottom_left())
def get_left(self):
return (self.get_bottom_left(), self.get_top_left())
def get_center(self):
return interpolate(self.get_top_left(), self.get_bottom_right(), 0.5)
def get_width(self):
return self.rect[0][1] - self.rect[0][0]
def get_height(self):
return self.rect[1][1] - self.rect[1][0]
def splits_on_dim(self, dim):
x_interval = self.rect[0]
y_interval = self.rect[1]
# TODO: Can refactor the following; will do later
if dim == 0:
return_data = [RectangleData(new_interval, y_interval) for new_interval in split_interval(x_interval)]
elif dim == 1:
return_data = [RectangleData(x_interval, new_interval) for new_interval in split_interval(y_interval)[::-1]]
else:
print("RectangleData.splits_on_dim passed illegitimate dimension!")
return tuple(return_data)
def split_line_on_dim(self, dim):
x_interval = self.rect[0]
y_interval = self.rect[1]
if dim == 0:
sides = (self.get_top(), self.get_bottom())
elif dim == 1:
sides = (self.get_left(), self.get_right())
else:
print("RectangleData.split_line_on_dim passed illegitimate dimension!")
return tuple([mid(x, y) for (x, y) in sides])
class EquationSolver2dNode(object):
def __init__(self, first_anim, children = []):
self.first_anim = first_anim
self.children = children
def depth(self):
if len(self.children) == 0:
return 0
return 1 + max([n.depth() for n in self.children])
def nodes_at_depth(self, n):
if n == 0:
return [self]
# Not the efficient way to flatten lists, because Python + is linear in list size,
# but we have at most two children, so no big deal here
return sum([c.nodes_at_depth(n - 1) for c in self.children], [])
# This is definitely NOT the efficient way to do BFS, but I'm just trying to write something
# quick without thinking that gets the job done on small instances for now
def hacky_bfs(self):
depth = self.depth()
# Not the efficient way to flatten lists, because Python + is linear in list size,
# but this IS hacky_bfs...
return sum([self.nodes_at_depth(i) for i in range(depth + 1)], [])
def display_in_series(self):
return Succession(self.first_anim, *[n.display_in_series() for n in self.children])
def display_in_parallel(self):
return Succession(self.first_anim, AnimationGroup(*[n.display_in_parallel() for n in self.children]))
def display_in_bfs(self):
bfs_nodes = self.hacky_bfs()
return Succession(*[n.first_anim for n in bfs_nodes])
def play_in_bfs(self, scene, border_anim):
bfs_nodes = self.hacky_bfs()
print("Number of nodes: ", len(bfs_nodes))
if len(bfs_nodes) < 1:
print("Less than 1 node! Aborting!")
return
scene.play(bfs_nodes[0].first_anim, border_anim)
for node in bfs_nodes[1:]:
scene.play(node.first_anim)
class EquationSolver2d(ColorMappedObjectsScene):
CONFIG = {
"camera_config" : {"use_z_coordinate_for_display_order": True},
"initial_lower_x" : -5,
"initial_upper_x" : 5,
"initial_lower_y" : -3,
"initial_upper_y" : 3,
"num_iterations" : 0,
"num_checkpoints" : 10,
# Should really merge this into one enum-style variable
"display_in_parallel" : False,
"display_in_bfs" : False,
"use_fancy_lines" : True,
"line_color" : WHITE, # Only used for non-fancy lines
# TODO: Consider adding a "find_all_roots" flag, which could be turned off
# to only explore one of the two candidate subrectangles when both are viable
# Walker settings
"show_arrows" : True,
"scale_arrows" : False,
# Special case settings
# These are used to hack UhOhScene, where we display different colors than
# are actually, secretly, guiding the evolution of the EquationSolver2d
#
# replacement_background_image_file has to be manually configured
"show_winding_numbers" : True,
# Used for UhOhScene;
"manual_wind_override" : None,
"show_cursor" : True,
"linger_parameter" : 0.5,
"use_separate_plays" : False,
"use_cheap_winding_numbers" : False, # To use this, make num_checkpoints large
}
def construct(self):
if self.num_iterations == 0:
print("You forgot to set num_iterations (maybe you meant to subclass something other than EquationSolver2d directly?)")
return
ColorMappedObjectsScene.construct(self)
num_plane = self.num_plane
clockwise_val_func = lambda p : -point_to_rev(self.func(p))
base_line = Line(UP, RIGHT, stroke_width = border_stroke_width, color = self.line_color)
if self.use_fancy_lines:
base_line.color_using_background_image(self.background_image_file)
def match_style_with_bg(obj1, obj2):
obj1.match_style(obj2)
bg = obj2.get_background_image_file()
if bg != None:
obj1.color_using_background_image(bg)
run_time_base = 1
run_time_with_lingering = run_time_base + self.linger_parameter
base_rate = lambda t : t
linger_rate = squish_rate_func(lambda t : t, 0,
fdiv(run_time_base, run_time_with_lingering))
cursor_base = TexText("?")
cursor_base.scale(2)
# Helper functions for manual_wind_override
def head(m):
if m == None:
return None
return m[0]
def child(m, i):
if m == None or m == 0:
return None
return m[i + 1]
def Animate2dSolver(cur_depth, rect, dim_to_split,
sides_to_draw = [0, 1, 2, 3],
manual_wind_override = None):
print("Solver at depth: " + str(cur_depth))
if cur_depth >= self.num_iterations:
return EquationSolver2dNode(empty_animation)
def draw_line_return_wind(start, end, start_wind, should_linger = False, draw_line = True):
alpha_winder = make_alpha_winder(clockwise_val_func, start, end, self.num_checkpoints, cheap = self.use_cheap_winding_numbers)
a0 = alpha_winder(0)
rebased_winder = lambda alpha: alpha_winder(alpha) - a0 + start_wind
colored_line = Line(num_plane.coords_to_point(*start) + IN, num_plane.coords_to_point(*end) + IN)
match_style_with_bg(colored_line, base_line)
walker_anim = LinearWalker(
start_coords = start,
end_coords = end,
coords_to_point = num_plane.coords_to_point,
val_func = self.func, # Note: This is the image func, and not logic_func
number_update_func = rebased_winder if self.show_winding_numbers else None,
remover = True,
walker_stroke_color = WALKER_LIGHT_COLOR,
show_arrows = self.show_arrows,
scale_arrows = self.scale_arrows,
)
if should_linger: # Do we need an "and not self.display_in_parallel" here?
run_time = run_time_with_lingering
rate_func = | |
journal = {CoRR},
volume = {abs/1903.04561},
year = {2019},
url = {http://arxiv.org/abs/1903.04561},
archivePrefix = {arXiv},
eprint = {1903.04561},
timestamp = {Sun, 31 Mar 2019 19:01:24 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1903-04561},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
# An experiment we could try is tagging with another public figure tag called PUBLIC_FIGURE_OPINION
# so that we could train a model to distinguish between public figure mentions in opinion domains vs non-opinion domains
dataset = load_dataset("civil_comments")
for d in (dataset['train'],):
for idx, data in enumerate(d):
score = sum([data[feature] for feature in
['toxicity', 'severe_toxicity', 'obscene', 'threat', 'insult', 'identity_attack',
'sexual_explicit']])
l2 = data['text']
l2 = l2.replace("\n", " ").replace(" ", " ").replace(" ", " ")
l2Arr = l2.split()
has_a_name = has_any(first_names, l2Arr)
l2_lower = l2.lower()
if random.choice([0,
1]) and not has_a_name and "mr." not in l2_lower and "ms." not in l2_lower and "mrs." not in l2_lower and "president" not in l2_lower and "governor" not in l2_lower and "mayor" not in l2_lower:
continue
if len(l2Arr) > 10 and len(l2Arr) < 50 and (score <= 0.5 or random.randint(0,
10) == 0): # having too much toxic content may skew the data
if has_a_name or "mr." in l2_lower or "ms." in l2_lower or "mrs." in l2_lower or "senator" in l2_lower or "president" in l2_lower or "governor" in l2_lower or "mayor" in l2_lower:
o.write(l2 + "\tcivil_comments\n")
os.system("sort --parallel=32 english.tsv -o english.tsv")
with open("english_cleaned.tsv", "w", encoding="utf8") as o:
with open("english.tsv", "rb") as f:
prev = ""
while True:
l = f.readline().decode()
if not l: break
l = l.strip()
l2 = l.replace(":", "").replace("[", "").replace("]", "").replace(".", "").replace("!", "").replace("?",
"").replace(
",", "").replace("-", "").replace(";", "").replace(" ", "").lower()
prev2 = prev.replace(":", "").replace("[", "").replace("]", "").replace(".", "").replace("!",
"").replace(
"?", "").replace(",", "").replace("-", "").replace(";", "").replace(" ", "").lower()
if prev != "" and (l2 == prev2 or (len(prev) > 10 and len(l) > 10 and prev2[:10] == l2[:10])):
if len(l) > len(prev):
prev = l
continue
else:
if prev:
if prev[0] < 'וח':
o.write(prev.lstrip(':;.+- ') + "\n")
prev = l
if prev:
if prev[0] < 'וח':
o.write(prev.lstrip(':;.+- ') + "\n")
os.system("sort --parallel=32 english_cleaned.tsv -o english_cleaned.tsv")
os.system(f"cp english_cleaned.tsv {share_dir}/english_cleaned.tsv")
def check_good_sentence(s, en_lang_cutoff=0.1, junk_ratio=0.5, stopword_check=True):
# basic dejunk
s = s.lower().strip()
if not s: return False
jr = len([s2 for s2 in s if s2 in junk_dict]) / len(s)
if jr >= junk_ratio:
return False
sArr = [s2.strip("' 0123456789¯_§½¼¾×|†—~\"—±′–'°−{}[]·-\'?,./<>!@#^&*()+-‑=:;`→¶'") for s2 in s.lower().split()]
if len(sArr) == 0:
return False
# stopword check
if stopword_check and len([s2 for s2 in sArr if s2 in stopwords_en]) / len(sArr) < en_lang_cutoff:
return False
else:
# langid check
try:
lang = langid.classify(s)[0]
except:
lang = ""
return lang == "en"
def create_oscar_subset_for_ner():
with open("pii_oscar.txt", "w", encoding="utf8") as o:
with open("oscar_sample.txt", "rb") as f:
while True:
sent = f.readline().decode()
if not sent: break
sent = sent.strip()
for sent2 in sent.split("<|endoftext|>"):
sent2 = sent2.strip()
sentArr = sent2.split()
if len(sentArr) > 150:
# print ("truncating sent", sent)
sent2 = " ".join(sentArr[:150])
if "Alzheimer's" in sent2 or "Alzheimer" in sent2 or 'heart disease' in sent2 or ' AIDS ' in sent2 or ' HIV ' in sent2 or ' was born ' in sent2 or 'Social Secu' in sent2 or 'socialist' in sent2 or 'republican' in sent2 or 'democrat' in sent2 or 'lower class' in sent2 or ' union ' in sent2 or 'upper class' in sent2 or 'middle class' in sent2 or ' cancer ' in sent2:
if 'pussy' not in sent2 and ' cock ' not in sent2:
o.write(sent2 + "\n")
url = _get_oscar_urls("en")[0]
_download_urls([url])
file = url.split("/")[-1]
for sent2 in gzip.open(file):
sent2 = sent2.decode()
sent2 = sent2.strip()
sentArr = sent2.split()
if len(sentArr) > 150:
sent2 = " ".join(sentArr[:150])
# let's just look for disease age, for the bigger set, since terms like "republic" and "democract" are over represented
if "Alzheimer's" in sent2 or "Alzheimer" in sent2 or 'heart disease' in sent2 or ' AIDS ' in sent2 or ' HIV ' in sent2 or ' was born ' in sent2 or ' cancer ' in sent2:
if not check_good_sentence(sent2):
continue
if 'pussy' not in sent2 and ' cock ' not in sent2:
o.write(sent2 + "\n")
os.system("sort --parallel=32 pii_oscar.txt -o pii_oscar.txt")
def do_ner(do_casehold=False):
""" Create English based NER/PII dataset """
faker_target_lang = Faker(faker_map["en"])
faker_target_lang.add_provider(person)
faker_target_lang.add_provider(ssn)
faker_target_lang.add_provider(address)
nlp = spacy.load('en_core_web_lg')
row_id = 0
with open("pii_en.jsonl", "w", encoding="utf8") as o:
with open("pii_oscar.txt", "rb") as f:
prev = ""
for sent2 in tqdm(f):
# sent2 = f.readline().decode().strip()
sent2 = sent2.decode().strip()
if not sent2: break
domain = "oscar"
if sent2 == prev:
continue
if prev:
sent3 = prev
if sent3[0] in "0123456789":
sent3 = sent3.split(" ", 1)[1]
sentArr = sent3.split()
if sentArr[0].endswith(":"):
sentArr = sentArr[:1]
if len(sentArr) > 100:
sentArr = sentArr[:100]
sent3 = " ".join(sentArr)
if True:
doc = nlp(sent3)
entities = list(doc.ents)
if [entity for entity in entities if entity.label_ == 'PERSON']:
ents = [[entity.text, entity.label_] for entity in entities if
entity.label_ in ('PERSON', 'GPE', 'ORG', 'NORP') and 'http:' not in entity.text]
swap = False
for label, regex in basic_regex:
for x in regex.findall(sent3):
if type(x) != str: continue
ents.append([x, label])
if label in ('GOVT_ID', 'STREET_ADDRESS',):
swap = True
if len(ents) > 1 or 'cancer' in sent3 or 'class' in sent3 or 'union' in sent3 or 'democrat' in sent3 or 'republican' in sent3 or 'socialist' in sent3:
if len(ents) < 5:
if swap or '@' in sent3 or 'Social Sec' in sent3 or 'password' in sent3:
context = {}
ents2 = []
for item in ents:
if item[1] in ('GOVT_ID', 'STREET_ADDRESS', 'PERSON'):
if item[0] in public_figures:
item[1] = 'PUBLIC_FIGURE'
else:
context[item[0]] = context.get(item[0], \
faker_target_lang.name() if " " in
item[
0] and
item[
1] == 'PERSON' else \
faker_target_lang.first_name() if
item[1] == 'PERSON' else \
faker_target_lang.ssn() if
item[1] == 'GOVT_ID' else \
faker_target_lang.address() if
item[
1] == 'STREET_ADDRESS' else \
item[0])
if " " in item[0]:
context[item[0].split()[0]] = context[item[0]].split()[0]
context[item[0].split()[-1]] = context[item[0]].split()[-1]
item[0] = context[item[0]]
ents2.append(item)
else:
ents2 = ents
o.write(json.dumps(
{"text": sent3, "ner": ents2, "domain": domain, "target_lang": "en",
"id": row_id}) + "\n")
row_id += 1
prev = sent2
with open("english_cleaned.tsv", "rb") as f:
# while True:
# l = f.readline().decode()
# if not l: break
for l in tqdm(f):
# sent2 = f.readline().decode().strip()
l = l.decode().strip()
l = l.split("\t")
sent = l[0]
domain = l[-1].strip()
if not check_good_sentence(sent):
continue
if not do_casehold and domain == "casehold": continue
if "Notice No." in sent: continue
if "TO: ALL COMEX" in sent: continue
if "TO: All NYMEX" in sent: continue
if "TO: All New" in sent: continue
if sent[0] in "0123456789":
sent = sent.split(" ", 1)[1]
sentArr = sent.split()
if sentArr[0].endswith(":"):
sentArr = sentArr[:1]
if len(sentArr) > 100:
sentArr = sentArr[:100]
sent = " ".join(sentArr)
doc = nlp(sent)
entities = list(doc.ents)
if [entity for entity in entities if entity.label_ == 'PERSON']:
ents = [[entity.text, entity.label_] for entity in entities if
entity.label_ in ('PERSON', 'GPE', 'ORG', 'NORP') and 'http:' not in entity.text]
swap = False
for label, regex in basic_regex:
for x in regex.findall(sent):
if type(x) != str: continue
ents.append([x, label])
if label in ('GOVT_ID', 'STREET_ADDRESS',):
swap = True
if len(ents) > 1 and len(ents) < 5:
if swap or random.randint(0,
1) == 0 or '@' in sent or 'Social Sec' in sent or 'password' in sent:
context = {}
ents2 = []
for item in ents:
if item[1] in ('GOVT_ID', 'STREET_ADDRESS', 'PERSON'):
if item[0] in public_figures:
item[1] = 'PUBLIC_FIGURE'
else:
context[item[0]] = context.get(item[0], \
faker_target_lang.name() if " " in item[0] and
item[
1] == 'PERSON' else \
faker_target_lang.first_name() if item[
1] == 'PERSON' else \
faker_target_lang.ssn() if item[
1] == 'GOVT_ID' else \
faker_target_lang.address() if item[
1] == 'STREET_ADDRESS' else \
item[0])
sent = sent.replace(item[0], context[item[0]])
if " " in item[0]:
context[item[0].split()[0]] = context[item[0]].split()[0]
context[item[0].split()[-1]] = context[item[0]].split()[-1]
item[0] = context[item[0]]
ents2.append(item)
else:
ents2 = ents
o.write(json.dumps(
{"text": sent, "ner": ents2, "domain": domain, "target_lang": "en", "id": row_id}) + "\n")
row_id += 1
def pre_translation_steps(target_lang='hi', person_swap=True):
texts | |
#@+node:ekr.20040306214401: *5* p.Status bits
def isDirty(self): return self.v.isDirty()
def isMarked(self): return self.v.isMarked()
def isOrphan(self): return self.v.isOrphan()
def isSelected(self): return self.v.isSelected()
def isTopBitSet(self): return self.v.isTopBitSet()
def isVisited(self): return self.v.isVisited()
def status(self): return self.v.status()
#@+node:ekr.20040306214240.2: *4* p.children & parents
#@+node:ekr.20040326064330: *5* p.childIndex
# This used to be time-critical code.
def childIndex(self):
p = self
return p._childIndex
#@+node:ekr.20040323160302: *5* p.directParents
def directParents(self):
return self.v.directParents()
#@+node:ekr.20040306214240.3: *5* p.hasChildren & p.numberOfChildren
def hasChildren(self):
p = self
return len(p.v.children) > 0
hasFirstChild = hasChildren
def numberOfChildren(self):
p = self
return len(p.v.children)
#@+node:ekr.20031218072017.915: *4* p.getX & VNode compatibility traversal routines
# These methods are useful abbreviations.
# Warning: they make copies of positions, so they should be used _sparingly_
def getBack(self): return self.copy().moveToBack()
def getFirstChild(self): return self.copy().moveToFirstChild()
def getLastChild(self): return self.copy().moveToLastChild()
def getLastNode(self): return self.copy().moveToLastNode()
# def getLastVisible (self): return self.copy().moveToLastVisible()
def getNext(self): return self.copy().moveToNext()
def getNodeAfterTree(self): return self.copy().moveToNodeAfterTree()
def getNthChild(self, n): return self.copy().moveToNthChild(n)
def getParent(self): return self.copy().moveToParent()
def getThreadBack(self): return self.copy().moveToThreadBack()
def getThreadNext(self): return self.copy().moveToThreadNext()
# New in Leo 4.4.3 b2: add c args.
def getVisBack(self, c): return self.copy().moveToVisBack(c)
def getVisNext(self, c): return self.copy().moveToVisNext(c)
# These are efficient enough now that iterators are the normal way to traverse the tree!
back = getBack
firstChild = getFirstChild
lastChild = getLastChild
lastNode = getLastNode
# lastVisible = getLastVisible # New in 4.2 (was in tk tree code).
next = getNext
nodeAfterTree = getNodeAfterTree
nthChild = getNthChild
parent = getParent
threadBack = getThreadBack
threadNext = getThreadNext
visBack = getVisBack
visNext = getVisNext
# New in Leo 4.4.3:
hasVisBack = visBack
hasVisNext = visNext
#@+node:tbrown.20111010104549.26758: *4* p.get_UNL
def get_UNL(self, with_file=True, with_proto=False, with_index=True, with_count=False):
"""
with_file=True - include path to Leo file
with_proto=False - include 'file://'
with_index - include ',x' at end where x is child index in parent
with_count - include ',x,y' at end where y zero based count of same headlines
"""
aList = []
for i in self.self_and_parents():
if with_index or with_count:
i = i.copy()
count = 0
ind = 0
p = i.copy()
while p.hasBack():
ind = ind + 1
p = p.back().copy()
if i.h == p.h:
count = count + 1
aList.append(i.h.replace('-->', '--%3E') + ":" + str(ind))
# g.recursiveUNLFind and sf.copy_to_my_settings undo this replacement.
if count or with_count:
aList[-1] = aList[-1] + "," + str(count)
else:
aList.append(i.h.replace('-->', '--%3E'))
# g.recursiveUNLFind and sf.copy_to_my_settings undo this replacement.
UNL = '-->'.join(reversed(aList))
if with_proto:
# return ("file://%s#%s" % (self.v.context.fileName(), UNL)).replace(' ', '%20')
s = "unl:" + "//%s#%s" % (self.v.context.fileName(), UNL)
return s.replace(' ', '%20')
elif with_file:
return ("%s#%s" % (self.v.context.fileName(), UNL))
else:
return UNL
#@+node:ekr.20080416161551.192: *4* p.hasBack/Next/Parent/ThreadBack
def hasBack(self):
p = self
return p.v and p._childIndex > 0
def hasNext(self):
p = self
try:
parent_v = p._parentVnode()
# Returns None if p.v is None.
return p.v and parent_v and p._childIndex + 1 < len(parent_v.children)
except Exception:
g.trace('*** Unexpected exception')
g.es_exception()
return None
def hasParent(self):
p = self
return p.v and p.stack
def hasThreadBack(self):
p = self
return p.hasParent() or p.hasBack()
# Much cheaper than computing the actual value.
#@+node:ekr.20080416161551.193: *5* hasThreadNext (the only complex hasX method)
def hasThreadNext(self):
p = self
if not p.v: return False
if p.hasChildren() or p.hasNext(): return True
n = len(p.stack) - 1
while n >= 0:
v, childIndex = p.stack[n]
# See how many children v's parent has.
if n == 0:
parent_v = v.context.hiddenRootNode
else:
parent_v, junk = p.stack[n - 1]
if len(parent_v.children) > childIndex + 1:
# v has a next sibling.
return True
n -= 1
return False
#@+node:ekr.20060920203352: *4* p.findRootPosition
def findRootPosition(self):
# 2011/02/25: always use c.rootPosition
p = self
c = p.v.context
return c.rootPosition()
#@+node:ekr.20080416161551.194: *4* p.isAncestorOf
def isAncestorOf(self, p2):
'''Return True if p is one of the direct ancestors of p2.'''
p = self
c = p.v.context
if not c.positionExists(p2):
return False
for z in p2.stack:
# 2013/12/25: bug fix: test childIndices.
# This is required for the new per-position expansion scheme.
parent_v, parent_childIndex = z
if parent_v == p.v and parent_childIndex == p._childIndex:
return True
return False
#@+node:ekr.20040306215056: *4* p.isCloned
def isCloned(self):
p = self
return p.v.isCloned()
#@+node:ekr.20040307104131.2: *4* p.isRoot
def isRoot(self):
p = self
return not p.hasParent() and not p.hasBack()
#@+node:ekr.20080416161551.196: *4* p.isVisible (slow)
def isVisible(self, c):
'''Return True if p is visible in c's outline.'''
p = self
def visible(p, root=None):
for parent in p.parents():
if parent and parent == root:
# Fix bug: https://github.com/leo-editor/leo-editor/issues/12
return True
if not c.shouldBeExpanded(parent):
return False
return True
if c.hoistStack:
root = c.hoistStack[-1].p
if p == root:
# Fix bug: https://github.com/leo-editor/leo-editor/issues/12
return True
else:
return root.isAncestorOf(p) and visible(p, root=root)
else:
for root in c.rootPosition().self_and_siblings():
if root == p or root.isAncestorOf(p):
return visible(p)
return False
#@+node:ekr.20080416161551.197: *4* p.level & simpleLevel
def level(self):
'''Return the number of p's parents.'''
p = self
return len(p.stack) if p.v else 0
simpleLevel = level
#@+node:ekr.20111005152227.15566: *4* p.positionAfterDeletedTree
def positionAfterDeletedTree(self):
'''Return the position corresponding to p.nodeAfterTree() after this node is
deleted. This will be p.nodeAfterTree() unless p.next() exists.
This method allows scripts to traverse an outline, deleting nodes during the
traversal. The pattern is::
p = c.rootPosition()
while p:
if <delete p?>:
next = p.positionAfterDeletedTree()
p.doDelete()
p = next
else:
p.moveToThreadNext()
This method also allows scripts to *move* nodes during a traversal, **provided**
that nodes are moved to a "safe" spot so that moving a node does not change the
position of any other nodes.
For example, the move-marked-nodes command first creates a **move node**, called
'Clones of marked nodes'. All moved nodes become children of this move node.
**Inserting** these nodes as children of the "move node" does not change the
positions of other nodes. **Deleting** these nodes *may* change the position of
nodes, but the pattern above handles this complication cleanly.
'''
p = self
next = p.next()
if next:
# The new position will be the same as p, except for p.v.
p = p.copy()
p.v = next.v
return p
else:
return p.nodeAfterTree()
#@+node:shadow.20080825171547.2: *4* p.textOffset
def textOffset(self):
'''
Return the fcol offset of self.
Return None if p is has no ancestor @<file> node.
http://tinyurl.com/5nescw
'''
p = self
found, offset = False, 0
for p in p.self_and_parents():
if p.isAnyAtFileNode():
# Ignore parent of @<file> node.
found = True
break
parent = p.parent()
if not parent:
break
# If p is a section definition, search the parent for the reference.
# Otherwise, search the parent for @others.
h = p.h.strip()
i = h.find('<<')
j = h.find('>>')
target = h[i: j + 2] if -1 < i < j else '@others'
for s in parent.b.split('\n'):
if s.find(target) > -1:
offset += g.skip_ws(s, 0)
break
return offset if found else None
#@+node:ekr.20150410101842.1: *3* p.isOutsideAtFileTree
def isOutsideAnyAtFileTree(self):
'''Select the first clone of target that is outside any @file node.'''
p = self
for parent in p.self_and_parents():
if parent.isAnyAtFileNode():
return False
return True
#@+node:ekr.20080423062035.1: *3* p.Low level methods
# These methods are only for the use of low-level code
# in leoNodes.py, leoFileCommands.py and leoUndo.py.
#@+node:ekr.20080427062528.4: *4* p._adjustPositionBeforeUnlink
def _adjustPositionBeforeUnlink(self, p2):
'''Adjust position p before unlinking p2.'''
# p will change if p2 is a previous sibling of p or
# p2 is a previous sibling of any ancestor of p.
p = self; sib = p.copy()
# A special case for previous siblings.
# Adjust p._childIndex, not the stack's childIndex.
while sib.hasBack():
sib.moveToBack()
if sib == p2:
p._childIndex -= 1
return
# Adjust p's stack.
stack = []; changed = False; i = 0
while i < len(p.stack):
v, childIndex = p.stack[i]
p3 = Position(v=v, childIndex=childIndex, stack=stack[: i])
while p3:
if p2 == p3:
# 2011/02/25: compare full positions, not just vnodes.
# A match with the to-be-moved node.
stack.append((v, childIndex - 1),)
changed = True
break # terminate only the inner loop.
p3.moveToBack()
else:
stack.append((v, childIndex),)
i += 1
if changed:
p.stack = stack
#@+node:ekr.20080416161551.214: *4* p._linkAfter
def _linkAfter(self, p_after, adjust=True):
'''Link self after | |
immutable once created. No mechanism is
provided for maintaining reference consistency if data in the alignment
are modified.
An Alignment is expected to be able to generate the following:
- Seqs: Sequence objects in the alignment, can turn themselves into
strings. These are usually thought of as "rows" in an
alignment.
- Positions: Vectors representing data in each position in the alignment
These are usually thought of as "columns" in an alignment.
- SeqData: Vectors representing data in each sequence in the alignment,
not necessarily guaranteed to turn themselves into a string
- Items: Iterator over the characters in the alignment
- Names: List of names of sequences in the alignment. Used for
display order. A cheap way to omit or reorder sequences is
to modify the list of names.
- NamedSeqs: Dict of name -> seq object, used for lookup.
- MolType: MolType of the alignment.
"""
DefaultGap = '-' #default gap character for padding
GapChars = dict.fromkeys('-?') #default gap chars for comparisons
def iterPositions(self, pos_order=None):
"""Iterates over positions in the alignment, in order.
pos_order refers to a list of indices (ints) specifying the column
order. This lets you rearrange positions if you want to (e.g. to pull
out individual codon positions).
Note that self.iterPositions() always returns new objects, by default
lists of elements. Use map(f, self.iterPositions) to apply the
constructor or function f to the resulting lists (f must take a single
list as a parameter). Note that some sequences (e.g. ViennaStructures)
have rules that prevent arbitrary strings of their symbols from being
valid objects.
Will raise IndexError if one of the indices in order exceeds the
sequence length. This will always happen on ragged alignments:
assign to self.SeqLen to set all sequences to the same length.
"""
get = self.NamedSeqs.__getitem__
pos_order = pos_order or xrange(self.SeqLen)
seq_order = self.Names
for pos in pos_order:
yield [get(seq)[pos] for seq in seq_order]
Positions = property(iterPositions)
def takePositions(self, cols, negate=False, seq_constructor=None):
"""Returns new Alignment containing only specified positions.
By default, the seqs will be lists, but an alternative constructor
can be specified.
Note that takePositions will fail on ragged positions.
"""
if seq_constructor is None:
seq_constructor = self.MolType.Sequence
result = {}
#if we're negating, pick out all the positions except specified indices
if negate:
col_lookup = dict.fromkeys(cols)
for key, row in self.NamedSeqs.items():
result[key] = seq_constructor([row[i] for i in range(len(row)) \
if i not in col_lookup])
#otherwise, just get the requested indices
else:
for key, row in self.NamedSeqs.items():
result[key] = seq_constructor([row[i] for i in cols])
return self.__class__(result, Names=self.Names)
def getPositionIndices(self, f, negate=False):
"""Returns list of column indices for which f(col) is True."""
#negate f if necessary
if negate:
new_f = lambda x: not f(x)
else:
new_f = f
return [i for i, col in enumerate(self.Positions) if new_f(col)]
def takePositionsIf(self, f, negate=False, seq_constructor=None):
"""Returns new Alignment containing cols where f(col) is True.
Note that the seqs in the new Alignment are always new objects. Default
constructor is list(), but an alternative can be passed in.
"""
if seq_constructor is None:
seq_constructor = self.MolType.Sequence
return self.takePositions(self.getPositionIndices(f, negate), \
seq_constructor=seq_constructor)
def IUPACConsensus(self, alphabet=None):
"""Returns string containing IUPAC consensus sequence of the alignment.
"""
if alphabet is None:
alphabet = self.MolType
consensus = []
degen = alphabet.degenerateFromSequence
for col in self.Positions:
consensus.append(degen(coerce_to_string(col)))
return coerce_to_string(consensus)
def columnFreqs(self, constructor=Freqs):
"""Returns list of Freqs with item counts for each column.
"""
return map(constructor, self.Positions)
def columnProbs(self, constructor=Freqs):
"""Returns FrequencyDistribuutions w/ prob. of each item per column.
Implemented as a list of normalized Freqs objects.
"""
freqs = self.columnFreqs(constructor)
for fd in freqs:
fd.normalize()
return freqs
def majorityConsensus(self, transform=None, constructor=Freqs):
"""Returns list containing most frequent item at each position.
Optional parameter transform gives constructor for type to which result
will be converted (useful when consensus should be same type as
originals).
"""
col_freqs = self.columnFreqs(constructor)
consensus = [freq.Mode for freq in col_freqs]
if transform == str:
return coerce_to_string(consensus)
elif transform:
return transform(consensus)
else:
return consensus
def uncertainties(self, good_items=None):
"""Returns Shannon uncertainty at each position.
Usage: information_list = alignment.information(good_items=None)
If good_items is supplied, deletes any symbols that are not in
good_items.
"""
uncertainties = []
#calculate column probabilities if necessary
if hasattr(self, 'PositionumnProbs'):
probs = self.PositionumnProbs
else:
probs = self.columnProbs()
#calculate uncertainty for each column
for prob in probs:
#if there's a list of valid symbols, need to delete everything else
if good_items:
prob = prob.copy() #do not change original
#get rid of any symbols not in good_items
for symbol in prob.keys():
if symbol not in good_items:
del prob[symbol]
#normalize the probabilities and add to the list
prob.normalize()
uncertainties.append(prob.Uncertainty)
return uncertainties
def scoreMatrix(self):
"""Returns a position specific score matrix for the alignment."""
return Dict2D(dict([(i,Freqs(col)) for i, col in enumerate(self.Positions)]))
def _get_freqs(self, index=None):
"""Gets array of freqs along index 0 (= positions) or 1 (= seqs).
index: if 0, will calculate the frequency of each symbol in each
position (=column) in the alignment. Will return 2D array where the
first index is the position, and the second index is the index of the
symbol in the alphabet. For example, for the TCAG DNA Alphabet,
result[3][0] would store the count of T at position 3 (i.e. the 4th
position in the alignment.
if 1, does the same thing except that the calculation is performed for
each sequence, so the 2D array has the sequence index as the first
index, and the symbol index as the second index. For example, for the
TCAG DNA Alphabet, result[3][0] would store the count of T in the
sequence at index 3 (i.e. the 4th sequence).
First an DenseAligment object is created, next the calculation is done
on this object. It is important that the DenseAlignment is initialized
with the same MolType and Alphabet as the original Alignment.
"""
da = DenseAlignment(self, MolType=self.MolType, Alphabet=self.Alphabet)
return da._get_freqs(index)
def getPosFreqs(self):
"""Returns Profile of counts: position by character.
See documentation for _get_freqs: this just wraps it and converts the
result into a Profile object organized per-position (i.e. per column).
"""
return Profile(self._get_freqs(1), self.Alphabet)
def sample(self, n=None, with_replacement=False, motif_length=1, \
randint=randint, permutation=permutation):
"""Returns random sample of positions from self, e.g. to bootstrap.
Arguments:
- n: the number of positions to sample from the alignment.
Default is alignment length
- with_replacement: boolean flag for determining if sampled
positions
- random_series: a random number generator with
.randint(min,max) .random() methods
Notes:
By default (resampling all positions without replacement), generates
a permutation of the positions of the alignment.
Setting with_replacement to True and otherwise leaving parameters
as defaults generates a standard bootstrap resampling of the
alignment.
"""
population_size = len(self) // motif_length
if not n:
n = population_size
if with_replacement:
locations = randint(0, population_size, n)
else:
assert n <= population_size, (n, population_size, motif_length)
locations = permutation(population_size)[:n]
positions = [(loc*motif_length, (loc+1)*motif_length)
for loc in locations]
sample = Map(positions, parent_length=len(self))
return self.gappedByMap(sample, Info=self.Info)
def slidingWindows(self, window, step, start=None, end=None):
"""Generator yielding new Alignments of given length and interval.
Arguments:
- window: The length of each returned alignment.
- step: The interval between the start of the successive
alignment objects returned.
- start: first window start position
- end: last window start position
"""
start = [start, 0][start is None]
end = [end, len(self)-window+1][end is None]
end = min(len(self)-window+1, end)
if start < end and len(self)-end >= window-1:
for pos in xrange(start, end, step):
yield self[pos:pos+window]
def aln_from_array(a, array_type=None, Alphabet=None):
"""Alignment from array of pos x seq: no change, names are integers.
This is an InputHandler for Alignment. It converts an arbitrary array
of numbers without change, but adds successive integer names (0-based) to
each sequence (i.e. column) in the input a. Data type | |
<filename>bolt4ds/pipe/api.py
import copy
import os
import json
import ntpath
import shutil
import warnings
import logging
# logging.basicConfig(level=logging.DEBUG)
from abc import ABC
from pathlib import Path
import jwt
import bolt4ds.collect.collect as d6tcollect
from .http_client import client as python_http_client
from .utils.utils import ClientTiny, loadjson, _dict_sort
class ConfigManager(object):
"""
Manage local config. The config is stored in JSON and can be edited directly `filecfg` location, by default '~/bolt4dspipe/cfg.json'
Args:
profile (str): name of profile to use
filecfg (str): path to where config file is stored
"""
def __init__(self, profile=None, filecfg='~/bolt4dspipe/cfg.json'):
self.profile = 'default' if profile is None else profile
if str(filecfg).startswith('~'):
filecfg = os.path.expanduser(filecfg)
self.filecfg = filecfg
def init(self, config=None, server='http://localhost', reset=False):
"""
Initialize config with content
Args:
config (dict): manually pass config object
server (str): location of REST API server
reset (bool): force reset of an existing config
"""
if os.path.exists(self.filecfg) and not reset and self.profile in self._loadall():
# todo: why does Path(self.filecfg).exists() not work in pytest?
warnings.warn('Config for profile {} in {} already exists, skipping init. Use reset=True to reset config.'.format(self.profile,self.filecfg))
return None
if not config:
config = {}
if 'server' not in config:
config['server'] = server
if 'filerepo' not in config:
config['filerepo'] = '~/bolt4dspipe'
p = Path(config['filerepo'])
p2 = p/'files/{}/'.format(self.profile)
config['filereporoot'] = str(p)
config['filerepo'] = str(p2)
if 'filedb' not in config:
config['filedb'] = str(p2/'.filedb.json')
# create config file if doesn't exist
if not os.path.exists(self.filecfg):
if not os.path.exists(ntpath.dirname(self.filecfg)):
os.makedirs(ntpath.dirname(self.filecfg))
self._save(config)
def update(self, config):
"""
Update config. Only keys present in the new dict will be updated, other parts of the config will be kept as is. In other words you can pass in a partial dict to update just the parts you need to be updated.
Args:
config (dict): updated config
"""
configall = self._loadall()
config_current = configall[self.profile]
config_current.update(config)
self._save(config_current)
return True
def _save(self, config):
if os.path.exists(self.filecfg):
configall = self._loadall()
configall[self.profile] = config
else:
configall = {}
configall[self.profile] = config
with open(self.filecfg, 'w') as f:
json.dump(configall, f, indent=4)
return True
def _loadall(self):
if not os.path.exists(self.filecfg):
self.init()
print('auto created profile "{}", see docs how to customize profile'.format(self.profile))
with open(self.filecfg, 'r') as f:
config = json.load(f)
return config
def load(self):
"""
Loads config
Returns:
dict: config
"""
config = self._loadall()
if self.profile not in config:
self.init()
config = self._loadall()
warnings.warn('auto created profile "{}", see docs how to customize profile'.format(self.profile))
config = config[self.profile]
for ikey in ['filereporoot','filerepo','filedb']:
if config[ikey].startswith('~'): # do this dynamically
config[ikey] = os.path.expanduser(config[ikey])
if not os.path.exists(config['filerepo']):
os.makedirs(config['filerepo'])
return config
class _APIBase(metaclass=d6tcollect.Collect):
def __init__(self, config=None, profile=None, filecfg='~/bolt4dspipe/cfg.json'):
self.profile = 'default' if profile is None else profile
if config is None:
self.configmgr = ConfigManager(filecfg=filecfg, profile=self.profile)
self.config = self.configmgr.load()
else:
self.config = config
warnings.warn("Using manual config override, some api functions might not work")
self.cfg_profile = self.config
self.cfg_filecfg = filecfg
self.filerepo = self.cfg_profile['filerepo']
self.dir = self.filerepo
self.dirpath = Path(self.dir)
self.key = self.cfg_profile.get('key',None)
if self.key is None:
# print("Auto generated an encryption key, update the config if you want to use your own")
import uuid
self.key = str(uuid.uuid4())
self.configmgr.update({'key':self.key})
self.encrypt_keys = ['location','readCredentials','writeCredentials','settings','files','readParams']
def list_pipes(self, names_only=True, parent_only=False):
"""
List all pipes you have access to
Args:
names_only (bool): if false, return all details
"""
r = self.cnxn.pipes.get()[1]
if parent_only:
r = [o for o in r if not o.get('parent')]
if names_only:
r = sorted([o['name'] for o in r])
return r
def wipe_all(self, confirm=True):
"""
Remove all d6tpipe files. WARNING: this can't be undone
Args:
confirm (bool): ask for user confirmation
"""
if confirm:
c = input('Confirm deleting files in {}. WARNING: this cannot be undone (y/n)'.format(self.dir))
else:
c = 'y'
if c=='y':
del self.cnxn
shutil.rmtree(self.filerepo)
os.remove(self.configmgr.filecfg)
def list_local_pipes(self):
"""
List all pipes already pulled
Returns:
list: list of pipe names
"""
dirs = [ name for name in os.listdir(self.filerepo) if os.path.isdir(os.path.join(self.filerepo, name)) ]
return [name for name in dirs if os.listdir(os.path.join(self.filerepo, name))]
def move_repo(self, path):
"""
Moves all files to another location and updates the config
Args:
path (pathlib.Path):
Returns:
bool:
"""
Path(path).mkdir(parents=True, exist_ok=True)
shutil.move(self.filerepo,path)
self.configmgr.update({'filerepo': path})
print('Moved repo to {}. Reloading api'.format(path))
self.__init__(profile=self.profile, filecfg=self.cfg_filecfg)
return True
def encode(self, dict_):
"""
Encrypt
Args:
dict_ (dict):
Returns:
dict: all values encrypted, keys are kept as is
"""
raise NotImplementedError('Sign up for premium features to access this function, email <EMAIL>')
def decode(self, dict_):
"""
Decrypt
Args:
dict_ (dict):
Returns:
dict:
"""
raise NotImplementedError('Sign up for premium features to access this function, email <EMAIL>')
class APILocal(_APIBase,metaclass=d6tcollect.Collect):
"""
As an alternative to the remote API, you can store everything locally. It mirrors the basic funtionality of the remote API but is not as feature rich.
Args:
config (dict): manually pass config object
profile (str): name of profile to use
filecfg (str): path to where config file is stored
"""
def __init__(self, config=None, profile=None, filecfg='~/bolt4dspipe/cfg.json'):
super().__init__(config,profile,filecfg)
self.cnxn = ClientTiny(self.config['filedb'])
self.mode = 'local'
class APIClient(_APIBase, metaclass=d6tcollect.Collect):
"""
Manager to interface with the remote API.
Args:
token (str): your API token
config (dict): manually pass config object
profile (str): name of profile to use
filecfg (str): path to where config file is stored
"""
def __init__(self, token='config', config=None, profile=None, filecfg='~/bolt4dspipe/cfg.json'):
super().__init__(config,profile,filecfg)
if token=='config':
self.token = self.cfg_profile.get('token',None)
else:
self.token = token
if self.token is not None:
request_headers = {
"Authorization": 'Token {0}'.format(self.token)
}
else: # if not registered
request_headers = {}
client = python_http_client.Client(host=self.cfg_profile['server'],
request_headers=request_headers,
append_slash=True,
version='1')
self.cnxn = client.api
self.mode = self.cfg_profile.get('mode','local')
if self.token is not None:
# test connection
try:
r,d = self.cnxn.get()
if 'databolt.tech' in self.cfg_profile['server'] and 'username' not in d:
warnings.warn('API authentication error')
else:
self.username = d.get('username')
print('Connected to {} as {}'.format(self.cfg_profile['server'],self.username))
if 'message' in d:
print(d['message'])
except Exception as e:
warnings.warn('API connection error ' + str(e))
else:
print('No token provided. Register or login to connect to repo API.')
def register(self, username, email, password):
"""
Register a new API user
Args:
username (str):
email (str):
password (str):
"""
data = {'username': username, 'email': email, 'password': password}
# response, data = self.cnxn.accounts.post(request_body=data)
try:
response, data = self.cnxn.accounts.post(request_body=data)
except Exception as e:
if 'This field must be unique' in str(e):
warnings.warn("Username or email already registered, registration failed. Pick a different username if you haven't registered before. If you already registered and forgot your token, call api.forgotToken(). If you want to re-register provide a different username")
return
else:
raise e
token = data.get('token')
self._printtoken(token)
return token
def login(self, username, password):
"""
Login if already registered
Args:
username (str):
password (str):
"""
return self.forgotToken(username, password)
def forgotToken(self, username, password):
"""
Retrieve your API token
Args:
username (str):
password (str):
"""
data = {'username': username, 'password': password}
response, data = self.cnxn.accounts.token.post(request_body=data)
token = data.get('token')
self._printtoken(token)
return token
def setToken(self, token):
self._printtoken(token)
def _printtoken(self, token):
print('Your token is below. Please save it and KEEP IT SAFE! If you forget it, you can retrieve it with APIClient().forgotToken(username, password)')
print(token)
print('reloading api to update token')
self.configmgr.update({'token': token})
self.__init__(profile=self.profile, filecfg=self.cfg_filecfg)
def _unregister(self, username):
self.cnxn.accounts._(username).delete()
self.configmgr.update({'token': None})
@d6tcollect.collect
def list_profiles(filecfg='~/bolt4dspipe/cfg.json'):
if str(filecfg).startswith('~'):
filecfg = os.path.expanduser(filecfg)
print(open(filecfg).read())
@d6tcollect.collect
def upsert_resource(apiroot, settings):
"""
Convenience function to create or update a resource
Args:
apiroot (obj): API endpoint root eg `api.cnxn.pipes`
settings (dict): resource settings
Returns:
response (obj): server response
data (dict): json returned by server
"""
try:
r, d = apiroot._(settings['name']).patch(request_body=settings)
except Exception as e:
if 'Not found' in str(e):
return apiroot.post(request_body=settings)
else:
raise e
return apiroot._(settings['name']).get()
@d6tcollect.collect
def upsert_pipe(api, settings):
"""
Convenience function to create or update a pipe
Args:
api (obj): api
settings (dict): pipe settings
Returns:
response (obj): server response
data (dict): json returned by server
"""
return upsert_resource(api.cnxn.pipes, settings)
@d6tcollect.collect
def upsert_permissions(api, pipe_name, settings):
"""
Convenience function to create or update pipe permissions
Args:
api (obj): api
settings (dict): permission settings
Returns:
response (obj): server response
data (dict): json returned by server
"""
apiroot = api.cnxn.pipes._(pipe_name).permissions
# for now just post to permissions
return apiroot.post(request_body=settings)
def upsert_pipe_json(api, path_json, name):
"""
Convenience function to create or update a resource. Loads settings from config file to secure credentials
Args:
api (obj): api
path_json (str): path to config file in json format
name (str): name of json entry
Returns:
| |
bin_ix: numpy.ndarray
:param events: Set of photon events to check if they are near the detector
edge.
:type events: dict
:param verbose: Verbosity level, a value of 0 is minimum verbosity.
:type verbose: int
:param valid_detrad: The radius, in degrees, beyond which an edge warning is
raised.
:type valid_detrad: float
:returns: bool -- Returns True/False whether a given set of events are too
close to the edge of the detector.
"""
ix = np.where(mc.distance(events['photons']['col'][bin_ix],
events['photons']['row'][bin_ix], 400, 400)*
gxt.aper2deg(4) >= valid_detrad)
return True if len(ix[0]) else False
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def getflags(band, bin_ix, events, verbose=0):
"""
Pass flags if data meets conditions that are likely to create
misleading photometry. The flags are binary, with bins set as follows:
1 - 'hotspot' - aperture events in pixels contiguous to a masked hotspot
2 - 'mask edge' - aperture events in pixels contiguous to the detector
edge
4 - 'exptime' - bin contains < 50% exposure time coverage
8 - 'respose' - events weighted with response < 0.7
16 - 'nonlinearity' - local countrate exceeds 10% response dropoff
32 - 'detector edge' - events outside of 0.5 degrees of detector center
64 - 'bg hotspot' - annulus events in pixels contiguous to a masked
hotspot
128 - 'bg mask' - annulus events in pixels contiguous to detector edge
:param band: The band being used, either 'FUV' or 'NUV'.
:type band: str
:param bin_ix: Array indices designating which events are in the time bin
of interest.
:type bin_ix: numpy.ndarray
:param events: Set of photon events to check for warning flags.
:type events: dict
:param verbose: Verbosity level, a value of 0 is minimum verbosity.
:type verbose: int
:returns: numpy.ndarray -- The array of flags for each photon event.
"""
bin_num = np.unique(bin_ix)
flags = np.zeros(len(bin_num))
for i, b in enumerate(bin_num):
ix = np.where(bin_ix == b)
if len(ix):
#ix = bin_ix[np.where(bin_ix == bin)]
try:
if maskwarning(band, ix, events, mapkey='H',
mode='aper', verbose=verbose):
flags[i] += 1
if maskwarning(band, ix, events, mapkey='E',
mode='aper', verbose=verbose):
flags[i] += 2
if exptimewarning(i, events, verbose=verbose):
flags[i] += 4
if lowresponsewarning(ix, events, verbose=verbose):
flags[i] += 8
if nonlinearitywarning(band, i, events, verbose=verbose):
flags[i] += 16
if detedgewarning(ix, events, verbose=verbose):
flags[i] += 32
if maskwarning(band, ix, events, mapkey='H',
mode='bg', verbose=verbose):
flags[i] += 64
if maskwarning(band, ix, events, mapkey='E',
mode='bg', verbose=verbose):
flags[i] += 128
#if caiwarning(band, ix, events, verbose=verbose):
# flags[i] += 256
if recoverywarning(band, ix, events, verbose=verbose):
flags[i] += 512
except:
raise
else:
flags[i] = np.nan
return np.array(flags)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def quickmag(band, ra0, dec0, tranges, radius, annulus=None, stepsz=None,
verbose=0, detsize=1.25, coadd=False):
"""
Primary wrapper function for generating and synthesizing all of the
parameters and calculations necessary to create light curves.
:param band: The band being used, either 'FUV' or 'NUV'.
:type band: str
:param ra0: Right ascension, in degrees, of the target position.
:type ra0: float
:param dec0: Declination, in degrees, of the target position.
:type dec0: float
:param tranges: Set of time ranges to query within in GALEX time seconds.
:type tranges: list
:param radius: The radius of the photometric aperture, in degrees.
:type radius: float
:param annulus: Radii of the inner and outer extents of the background
annulus, in degrees.
:type annulus: list
:param stepsz: The size of the time bins to use, in seconds.
:type stepsz: float
:param verbose: Verbosity level, a value of 0 is minimum verbosity.
:type verbose: int
:param detsize: Effective diameter, in degrees, of the field-of-view.
:param coadd: Set to True if calculating a total flux instead of flux
from each time bin.
:type coadd: bool
:returns: dict -- The light curve, including input parameters.
"""
if verbose:
mc.print_inline("Retrieving all of the target events.")
searchradius = radius if annulus is None else annulus[1]
data = pullphotons(band, ra0, dec0, tranges, searchradius, verbose=verbose,
detsize=detsize)
if not data:
return None
if verbose:
mc.print_inline("Binning data according to requested depth.")
# Multiple ways of defining bins
try:
trange = [np.array(tranges).min(), np.array(tranges).max()]
except ValueError:
trange = tranges
if coadd:
bins = np.array(trange)
elif stepsz:
bins = np.append(np.arange(trange[0], trange[1], stepsz), max(trange))
else:
bins = np.unique(np.array(tranges).flatten())
lcurve = {'params':gphot_params(band, [ra0, dec0], radius, annulus=annulus,
verbose=verbose, detsize=detsize,
stepsz=stepsz, trange=trange)}
# This is equivalent in function to np.digitize(data['t'],bins) except
# that it's much, much faster. See numpy issue #2656 at
# https://github.com/numpy/numpy/issues/2656
bin_ix = np.searchsorted(bins, data['t'], "right")
try:
lcurve['t0'] = bins[np.unique(bin_ix)-1]
lcurve['t1'] = bins[np.unique(bin_ix)]
lcurve['exptime'] = np.array(
dbt.compute_exptime(band,
tranges if coadd else list(zip(
lcurve['t0'], lcurve['t1'])),
verbose=verbose, coadd=coadd, detsize=detsize,
skypos=[ra0, dec0]))
except IndexError:
if np.isnan(data['t']):
if verbose:
mc.print_inline(
"No valid data available in {t}".format(t=tranges))
lcurve['t0'] = np.array([np.nan])
lcurve['t1'] = np.array([np.nan])
lcurve['exptime'] = np.array([0])
angSep = mc.angularSeparation(ra0, dec0, data['ra'], data['dec'])
aper_ix = np.where(angSep <= radius)
lcurve['t0_data'] = reduce_lcurve(bin_ix, aper_ix, data['t'], np.min)
lcurve['t1_data'] = reduce_lcurve(bin_ix, aper_ix, data['t'], np.max)
lcurve['t_mean'] = reduce_lcurve(bin_ix, aper_ix, data['t'], np.mean)
lcurve['q_mean'] = reduce_lcurve(bin_ix, aper_ix, data['q'], np.mean)
lcurve['counts'] = reduce_lcurve(bin_ix, aper_ix, data['t'], len)
lcurve['flat_counts'] = reduce_lcurve(bin_ix, aper_ix,
1./data['response'], np.sum)
lcurve['responses'] = reduce_lcurve(bin_ix, aper_ix, data['response'],
np.mean)
lcurve['detxs'] = reduce_lcurve(bin_ix, aper_ix, data['col'], np.mean)
lcurve['detys'] = reduce_lcurve(bin_ix, aper_ix, data['row'], np.mean)
lcurve['detrad'] = mc.distance(lcurve['detxs'], lcurve['detys'], 400, 400)
lcurve['racent'] = reduce_lcurve(bin_ix, aper_ix, data['ra'], np.mean)
lcurve['deccent'] = reduce_lcurve(bin_ix, aper_ix, data['dec'], np.mean)
skybgmcatdata = dbt.get_mcat_data([ra0, dec0], radius)
lcurve['mcat_bg'] = lcurve['exptime']*np.array(
[dbt.mcat_skybg(band, [ra0, dec0], radius, trange=tr,
mcat=skybgmcatdata, verbose=verbose)
for tr in zip(lcurve['t0'], lcurve['t1'])])
if annulus is not None:
annu_ix = np.where((angSep > annulus[0]) & (angSep <= annulus[1]))
lcurve['bg_counts'] = reduce_lcurve(bin_ix, annu_ix, data['t'], len)
lcurve['bg_flat_counts'] = reduce_lcurve(
bin_ix, annu_ix, data['response'], np.sum)
lcurve['bg'] = (mc.area(radius)*lcurve['bg_flat_counts'] /
(mc.area(annulus[1])-mc.area(annulus[0])))
else:
lcurve['bg_counts'] = np.zeros(len(lcurve['counts']))
lcurve['bg_flat_counts'] = np.zeros(len(lcurve['counts']))
lcurve['bg'] = np.zeros(len(lcurve['counts']))
lcurve['cps'] = lcurve['flat_counts']/lcurve['exptime']
lcurve['cps_err'] = aperture_error(lcurve['flat_counts'], lcurve['exptime'])
lcurve['cps_bgsub'] = (lcurve['flat_counts']-
lcurve['bg'])/lcurve['exptime']
lcurve['cps_bgsub_err'] = aperture_error(
lcurve['flat_counts'], lcurve['exptime'], bgcounts=lcurve['bg'])
lcurve['cps_mcatbgsub'] = (lcurve['flat_counts']-
lcurve['mcat_bg'])/lcurve['exptime']
lcurve['cps_mcatbgsub_err'] = aperture_error(
lcurve['flat_counts'], lcurve['exptime'], bgcounts=lcurve['mcat_bg'])
lcurve['flux'] = gxt.counts2flux(lcurve['cps'], band)
lcurve['flux_err'] = gxt.counts2flux(lcurve['cps_err'], band)
lcurve['flux_bgsub'] = gxt.counts2flux(lcurve['cps_bgsub'], band)
lcurve['flux_bgsub_err'] = gxt.counts2flux(lcurve['cps_bgsub_err'], band)
lcurve['flux_mcatbgsub'] = gxt.counts2flux(lcurve['cps_mcatbgsub'], band)
lcurve['flux_mcatbgsub_err'] = gxt.counts2flux(
lcurve['cps_mcatbgsub_err'], band)
# NOTE: These conversions to mag can throw logarithm warnings if the
# background is brighter than the source, resuling in a negative cps which
# then gets propagated as a magnitude of NaN.
lcurve['mag'] = gxt.counts2mag(lcurve['cps'], band)
lcurve['mag_err_1'] = (lcurve['mag'] -
gxt.counts2mag(lcurve['cps'] + lcurve['cps_err'],
band))
lcurve['mag_err_2'] = (gxt.counts2mag(lcurve['cps'] -
lcurve['cps_err'], band) -
lcurve['mag'])
lcurve['mag_bgsub'] = gxt.counts2mag(lcurve['cps_bgsub'], band)
lcurve['mag_bgsub_err_1'] = (lcurve['mag_bgsub'] -
gxt.counts2mag(lcurve['cps_bgsub'] +
lcurve['cps_bgsub_err'], band))
lcurve['mag_bgsub_err_2'] = (gxt.counts2mag(lcurve['cps_bgsub'] -
lcurve['cps_bgsub_err'], band) -
lcurve['mag_bgsub'])
lcurve['mag_mcatbgsub'] = gxt.counts2mag(lcurve['cps_mcatbgsub'], band)
lcurve['mag_mcatbgsub_err_1'] = (lcurve['mag_mcatbgsub'] -
gxt.counts2mag(lcurve['cps_mcatbgsub'] +
lcurve['cps_mcatbgsub_err'],
band))
lcurve['mag_mcatbgsub_err_2'] = (gxt.counts2mag(lcurve['cps_mcatbgsub'] -
lcurve['cps_mcatbgsub_err'],
band) -
lcurve['mag_mcatbgsub'])
lcurve['photons'] = data
lcurve['flags'] = getflags(band, bin_ix, lcurve, verbose=verbose)
return lcurve
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
def get_curve(band, ra0, dec0, radius, annulus=None, stepsz=None,
trange=None, tranges=None, verbose=0, coadd=False, minexp=1.,
maxgap=1., detsize=1.1):
"""
Wraps quickmag() to make it ensure some proper parameter formatting and
therefore make it slightly more user friendly.
:param band: The band being used, either 'FUV' or 'NUV'.
:type band: str
:param ra0: Right ascension, in degrees, of the target position.
:type ra0: float
:param dec0: Declination, in degrees, of the target position.
:type dec0: float
:param radius: The radius of the photometric aperture, in degrees.
:type radius: float
:param annulus: Radii of the inner and outer extents of the background
annulus, in degrees.
:type annulus: list
:param stepsz: The size (depth) of the time bins to use, in seconds.
:type stepsz: float
:param trange: Minimum and maximum time range to make a light curve,
in GALEX time seconds.
:type trange: list
:param tranges: Set of time ranges to query within in GALEX time seconds.
:type tranges: list
:param verbose: Verbosity level, a value of 0 is minimum verbosity.
:type verbose: int
:param coadd: Set to True if calculating a total flux instead of flux
from each time bin.
:type coadd: bool
:param minexp: Minimum gap size, in seconds, for data to be considered
contiguous.
:type minexp: float
:param maxgap: Maximum gap size, in seconds, for data to be considered
contiguous.
:type maxgap: float
:param detsize: Effective diameter, in degrees, of the field-of-view.
:type detsize: float
:returns: dict -- The light curve, | |
from __future__ import absolute_import, division, print_function
import tensorflow.compat.v1 as tf
import numpy as np
import time
from scipy.special import comb, perm
import argparse
import json
import os
import pprint
### self-defined module
import policy
from dataloader import *
import util
import tff_util
from task import Task
parser = argparse.ArgumentParser(description='FL')
parser.add_argument("--distribution", type=str, default="mix", help="Data distribution")
parser.add_argument("--lr", type=float, default=0.1, help="Initialized learning rate")
parser.add_argument("--policy", type=str, default="my", help="Client Assignment Policy")
parser.add_argument("--trade_once", action="store_true", help="Set to update clients selection only after the first epoch")
args = parser.parse_args()
# tf.compat.v1.enable_v2_behavior()
# tf.compat.v1.enable_eager_execution()
# NUM_EXAMPLES_PER_USER = 1000
### Experiment Configs
MIX_RATIO = 0.8
SIMULATE = False
EPOCH_NUM = 35
TRIAL_NUM = 1
TASK_NUM = 2
bid_per_loss_delta_space = [0.8,1,1.2]
required_client_num_space = [1,2,3]
target_labels_space = [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]]
START_TIME = time.time()
# class Client:
# def __init__(self, ...):
# self.cost = xxx
# self.id = xxx
# self.idlecost = xxx
# self.data = xxx
def pick_client_based_on_index(task, epoch, selected_client_idx, all_client_data, all_client_data_full):
clients_data = []
# if epoch == 0:
# return all_client_data_full
# else:
for idx in selected_client_idx:
batch= next(all_client_data[idx])
if task.target_labels is None:
new_batch = batch
else:
# filter batch according to required labels of tasks
new_batch = {"x": [], "y": []}
for idx, y in enumerate(batch["y"]):
if y in task.target_labels:
new_batch["x"].append(batch["x"][idx])
new_batch["y"].append(batch["y"][idx])
clients_data.append([new_batch])
return clients_data
def train_one_round(
task,
round_idx,
learning_rate,
epoch,
all_client_data,
all_client_data_full,
test_data,
ckpt=False, evaluate_each_client=False):
if task.selected_client_idx is None:
assert args.trade_once
task.totoal_loss_delta = 0
return
clients_data = pick_client_based_on_index(task, epoch, task.selected_client_idx, all_client_data, all_client_data_full)
task.params_per_client = [None] * len(task.selected_client_idx)
### Train
if SIMULATE:
local_models = [(task.model['weights'], task.model['bias']) for _ in task.selected_client_idx]
else:
local_models = tff_util.federated_train(task.model, learning_rate, clients_data)
### Output model parameters of all selected agents of this task
### Store the model parameters of this round if ckpt is True
for local_index in range(len(local_models)):
client_id = task.selected_client_idx[local_index]
if epoch == 0:
if task.params_per_client[local_index] is None:
task.params_per_client[local_index] = []
task.params_per_client[local_index].append(
(local_models[local_index], learning_rate))
task.params_per_client[local_index] = (local_models[local_index], learning_rate)
if ckpt:
f = open(os.path.join(os.path.dirname(__file__), "weights_"+str(client_id)+".txt"),"a",encoding="utf-8")
for i in local_models[local_index][0]:
line = ""
arr = list(i)
for j in arr:
line += (str(j)+"\t")
print(line, file=f)
print("***"+str(learning_rate)+"***",file=f)
print("-"*50,file=f)
f.close()
f = open(os.path.join(os.path.dirname(__file__), "bias_" + str(client_id) + ".txt"), "a", encoding="utf-8")
line = ""
for i in local_models[local_index][1]:
line += (str(i) + "\t")
print(line, file=f)
print("***" + str(learning_rate) + "***",file=f)
print("-"*50,file=f)
f.close()
### Store the model before updating
if task.model_epoch_start is None:
task.model_epoch_start = task.model
### Update the global model parameters
task.model = tff_util.weighted_average_model(local_models, data_num,
task.selected_client_idx, evaluate_each_client, test_data=None)
task.learning_rate = learning_rate
### evaluate the loss
loss = tff_util.evaluate_loss_on_server(task.model, test_data)
task.totoal_loss_delta = 1000*float(task.prev_loss - loss)
task.prev_loss = loss
task.log("Epoch {} Round {} at {:.3f} s, selected_client_idx: {}, learning rate: {:.3f}, loss: {:.3f}, loss_delta: {:.3f}".format(
epoch, round_idx, time.time()-START_TIME, task.selected_client_idx, learning_rate, loss, task.totoal_loss_delta))
#raise NotImplementedError("TODO, update task.total_loss_delta")
def calculate_feedback(task, test_data):
### TODO: comment the process to calculate the shapley value
### Substitute with our own algorithm
if task.selected_client_idx is None:
return []
### Calculate the Feedback
selected_client_num = len(task.selected_client_idx)
all_sets = util.PowerSetsBinary([i for i in range(selected_client_num)])
group_shapley_value = []
# print(train_with_gradient_and_valuation(task, [0], test_data, data_num))
# print(train_with_gradient_and_valuation(task, [9], test_data, data_num))
# print(train_with_gradient_and_valuation(task, [0, 9], test_data, data_num))
# raise
for s in all_sets:
_loss = tff_util.train_with_gradient_and_valuation(task, s, test_data, data_num)
contrib = task.prev_loss - _loss
group_shapley_value.append(contrib)
# task.log(str(s)+"\t"+str(group_shapley_value[len(group_shapley_value)-1]))
agent_shapley = []
for index in range(selected_client_num):
shapley = 0.0
for set_idx, j in enumerate(all_sets):
if index in j:
remove_list_index = util.remove_list_indexed(index, j, all_sets)
if remove_list_index != -1:
shapley += (group_shapley_value[set_idx] - group_shapley_value[
remove_list_index]) / (comb(selected_client_num - 1, len(all_sets[remove_list_index])))
agent_shapley.append(shapley)
# for ag_s in agent_shapley:
# print(ag_s)
# task.select_clients(agent_shapley, free_client)
# if sum(agent_shapley) == 0:
# import code
# code.interact(local=locals())
return agent_shapley
def run_one_trial():
### Load data
all_client_data, test_data, all_client_data_full = main_load(args)
############################### client ###########################################
### 0 denotes free, 1 denotes being occupied
free_client = [0] * NUM_AGENT
cost_list = []
for client_idx in range(NUM_AGENT):
# cost_list.append(random.randint(1,10)/10)
cost_list.append(0)
idlecost_list = []
for client_idx in range(NUM_AGENT):
idlecost_list.append(0)
client_feature_list = list(zip( cost_list, idlecost_list))
# client_list = []
# for client_idx in range(NUM_AGENT):
# client = Client(....)
# client_list.append(client)
############################### client end ###########################################
### Read inital model parameters from files
w_initial, b_initial = tff_util.init_model()
learning_rate = args.lr
############################### Task ###########################################
### Initialize the global model parameters for both tasks
### At the first epoch, both tasks select all clients
task_list = []
def create_task(selected_client_idx, init_model, required_client_num, bid_per_loss_delta, target_labels=None):
task = Task(
task_id = len(task_list),
selected_client_idx=selected_client_idx,
model = init_model,
required_client_num=required_client_num,
bid_per_loss_delta=bid_per_loss_delta,
target_labels=target_labels)
task_list.append(task)
### Init the loss
task.prev_loss = tff_util.evaluate_loss_on_server(task.model, test_data)
for task_id in range(TASK_NUM):
create_task(
selected_client_idx=list(range(NUM_AGENT)),
init_model = {
'weights': w_initial,
'bias': b_initial
},
required_client_num=util.sample_config(required_client_num_space, task_id, use_random=True),
bid_per_loss_delta=util.sample_config(bid_per_loss_delta_space, task_id, use_random=True),
target_labels=util.sample_config(target_labels_space,task_id, use_random=True)
)
### Initialize the price_table and bid table
price_table = None
def init_price_table(price_table):
price_table = []
for client_idx in range(NUM_AGENT):
init_price_list = []
for taks_idx in range(len(task_list)):
init_price_list.append(0)
price_table.append(init_price_list)
return price_table
price_table = init_price_table(price_table)
bid_table = np.zeros((NUM_AGENT, len(task_list)))
############################### Main process of FL ##########################################
total_reward_list = []
succ_cnt_list = []
reward_sum=[]
for epoch in range(EPOCH_NUM):
for task in task_list:
task.epoch = epoch
for round_idx in range(1):
### Train the model parameters distributedly
# return a list of model parameters
# local_models[0][0], weights of the 0-th agent
# local_models[0][1], bias of the 0-th agent
for task in task_list:
train_one_round(
task,
round_idx,
learning_rate,
epoch,
all_client_data,
all_client_data_full,
test_data,
ckpt=False,
evaluate_each_client=False)
learning_rate = learning_rate * 0.7
### At the end of this epoch
### At the first epoch, calculate the Feedback and update clients for each task
print("Start to update client assignment ... ")
shapely_value_table = [calculate_feedback(task, test_data) for task in task_list]
### Normalize using sigmoid
shapely_value_table = [
util.sigmoid(np.array(elem)) if len(elem) > 0 else elem
for elem in shapely_value_table]
shapely_value_table = np.array(shapely_value_table)
print(shapely_value_table)
### Update price table
for task_idx in range(len(task_list)):
if task_list[task_idx].selected_client_idx is None:
continue
selected_client_index = task_list[task_idx].selected_client_idx
for idx in range(len(selected_client_index)):
client_idx = selected_client_index[idx]
shapley_value = shapely_value_table[task_idx][idx]
shapely_value_scaled = shapley_value * len(selected_client_index) / NUM_AGENT
# price_table[client_idx][task_idx] = ((epoch / (epoch + 1)) * price_table[client_idx][task_idx] + (1 / (epoch + 1)) * shapely_value_scaled)
price_table[client_idx][task_idx] = shapely_value_scaled
total_cost = 0
bid_list = [task.totoal_loss_delta * task.bid_per_loss_delta for task in task_list]
total_bid = sum(bid_list)
for task in task_list:
if task.selected_client_idx is None:
continue
for client_idx in task.selected_client_idx :
total_cost += cost_list[client_idx]
assert price_table is not None
### Update bid table
for task_idx in range(len(task_list)):
if task_list[task_idx].selected_client_idx is None:
continue
selected_client_index = task_list[task_idx].selected_client_idx
for idx in range(len(selected_client_index)):
client_idx = selected_client_index[idx]
shapley_value = shapely_value_table[task_idx][idx]
bid_table[client_idx][task_idx] = shapley_value * bid_list[task_idx]
# reward_list = [task.totoal_loss_delta * task.bid_per_loss_delta for task in task_list]
# reward_list = [task.totoal_loss_delta * task.bid_per_loss_delta - total_cost for task in task_list]
#print ('reward list', reward_list)
print("Start to select clients ... ")
if epoch == 0 or not args.trade_once:
ask_table = util.calcualte_client_value(price_table, client_feature_list)
norm_ask_table = util.normalize_data(ask_table)
norm_bid_table = util.normalize_data(bid_table)
if args.policy == "my":
succ_cnt, reward = policy.my_select_clients(
norm_ask_table,
client_feature_list,
task_list,
norm_bid_table)
elif args.policy == "random":
succ_cnt, reward = policy.random_select_clients(
norm_ask_table,
client_feature_list,
task_list,
norm_bid_table)
elif args.policy == "simple":
succ_cnt, reward = policy.simple_select_clients(task_list, NUM_AGENT)
elif args.policy == "mcafee":
if epoch == 0:
succ_cnt, reward = policy.mcafee_select_clients(
norm_ask_table,
client_feature_list,
task_list,
norm_bid_table)
norm_bid_table_first_epoch = norm_bid_table.copy()
elif args.policy == "even":
succ_cnt, reward =policy.even_select_clients(
norm_ask_table,
client_feature_list,
task_list,
norm_bid_table)
else:
raise
print("Client assignment Done ")
for task in task_list:
task.end_of_epoch()
### caclulate reward
if epoch > 0:
if args.policy == "mcafee":
_, reward = policy.mcafee_select_clients(
norm_ask_table,
client_feature_list,
task_list,
norm_bid_table_first_epoch,
update=False)
bid_list = [task.totoal_loss_delta * reward for task in task_list]
print ([task.totoal_loss_delta for task in task_list])
total_bid = sum(bid_list)
total_reward = total_bid - total_cost
total_reward_list.append(total_reward)
reward_sum.append(sum(total_reward_list))
elif args.policy == "even":
_, reward = policy.even_select_clients(
norm_ask_table,
client_feature_list,
task_list,
norm_bid_table,
update=False
)
total_reward = reward - total_cost
total_reward_list.append(total_reward)
reward_sum.append(sum(total_reward_list))
# raise NotImplementedError("Current implementation is wrong")
# client_value_table = policy.calcualte_client_value(price_table, client_feature_list)
# task_price_list = np.sum(bid_table, axis=0)
# sorted_task_with_index = sorted(enumerate(task_price_list), key=lambda x: x[1], reverse=True)
# client_value_list = np.sum((client_value_table), axis=1)
# client_value_list_sorted = sorted(enumerate(client_value_list), key=lambda x: x[1], reverse=False)
# for j in selected_client_index:
# b = client_value_list_sorted[j][1]
# bid_list = [task.totoal_loss_delta * 1/2* (task.bid_per_loss_delta +b ) for task in task_list]
else:
total_reward = total_bid - total_cost
total_reward_list.append(total_reward)
reward_sum.append(sum(total_reward_list))
print(reward_sum[-1])
### Count successful matching
succ_cnt_list.append(succ_cnt)
### end of trianing
with | |
# Copyright 2022 The Waymo Open Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tools related to working with human keypoints."""
import abc
import dataclasses
from typing import Collection, Mapping, Optional, Tuple
import immutabledict
from matplotlib import collections as mc
from matplotlib import pyplot as plt
import numpy as np
import plotly.graph_objects as go
from waymo_open_dataset import label_pb2
from waymo_open_dataset.protos import keypoint_pb2
from waymo_open_dataset.utils import keypoint_data as _data
KeypointType = keypoint_pb2.KeypointType
ColorByType = Mapping['KeypointType', str]
DEFAULT_COLOR = '#424242'
OCCLUDED_COLOR = '#000000'
OCCLUDED_BORDER_WIDTH = 3
@dataclasses.dataclass(frozen=True)
class Line:
"""Line in 2D or 3D space for visualization of keypoints.
It is a primitive for visualization purposes.
Attributes:
start: Start point of the line, an array with 2 or 3 elements.
end: End point of the line, an array with 2 or 3 elements.
color: a hex string representing an RGB color.
width: thickness of the line in units specifix to a rendering function.
"""
start: np.ndarray
end: np.ndarray
color: str
width: float
@dataclasses.dataclass(frozen=True)
class Dot:
"""A small circle/sphere in 2D or 3D space for visualization of keypoints.
It is a primitive for visualization purposes.
Attributes:
location: Point in space, an array with 2 or 3 elements.
color: a hex string representing an RGB color.
size: diameter of the dot in units specifix to a rendering function.
name: name of the keypoint.
border_color: optional color of the border, default is None.
actual_border_color: same as the `border_color` if it was set and same as
`color` otherwise.
"""
location: np.ndarray
color: str
size: float
name: str
border_color: Optional[str] = None
@property
def actual_border_color(self) -> str:
if self.border_color:
return self.border_color
return self.color
@dataclasses.dataclass(frozen=True)
class Wireframe:
"""Data required to render visual representation of keypoints.
Attributes:
lines: set of line segments between keypoints.
dots: set of points in keypoints locations.
"""
lines: Collection[Line]
dots: Collection[Dot]
class Edge(abc.ABC):
"""Base class for all wireframe edges."""
@abc.abstractmethod
def create_lines(self, point_by_type: _data.PointByType,
colors: ColorByType) -> Collection[Line]:
"""Creates all lines to visualize an edge.
Args:
point_by_type: a mapping between keypoint type and its coordinates.
colors: a mpping between keypoitn type and its color.
Returns:
a list of line representing the edge in the wireframe.
"""
@dataclasses.dataclass(frozen=True)
class SolidLineEdge(Edge):
"""Represents an edge between two keypoints a single line segment.
Attributes:
start: type of the start keypoint.
end: type of the end keypoint.
width: thickness of the line in units specifix to a rendering function.
"""
start: 'KeypointType'
end: 'KeypointType'
width: float
def create_lines(self, point_by_type: _data.PointByType,
colors: Mapping['KeypointType', str]) -> Collection[Line]:
"""See base class."""
if self.start not in point_by_type or self.end not in point_by_type:
return []
color = colors.get(self.start, DEFAULT_COLOR)
return [
Line(point_by_type[self.start].location,
point_by_type[self.end].location, color, self.width)
]
def _bicolor_lines(start: np.ndarray, start_color: str, end: np.ndarray,
end_color: str, width: float) -> Collection[Line]:
middle = (start + end) / 2
start_half = Line(start, middle, start_color, width)
end_half = Line(middle, end, end_color, width)
return [start_half, end_half]
@dataclasses.dataclass(frozen=True)
class BicoloredEdge(SolidLineEdge):
"""Edge with two line segments colored according to keypoint type."""
def create_lines(self, point_by_type: _data.PointByType,
colors: Mapping['KeypointType', str]) -> Collection[Line]:
"""See base class."""
if self.start not in point_by_type or self.end not in point_by_type:
return []
start = np.array(point_by_type[self.start].location)
start_color = colors.get(self.start, DEFAULT_COLOR)
end = np.array(point_by_type[self.end].location)
end_color = colors.get(self.end, DEFAULT_COLOR)
return _bicolor_lines(start, start_color, end, end_color, self.width)
def _combine_colors(colors: Collection[str]) -> str:
if len(colors) == 1:
return next(iter(colors))
else:
return DEFAULT_COLOR
@dataclasses.dataclass(frozen=True)
class MultipointEdge(Edge):
"""An edge with start/end points computed by averaging input coordinates."""
start_avg: Collection['KeypointType']
end_avg: Collection['KeypointType']
width: float
def create_lines(self, point_by_type: _data.PointByType,
colors: Mapping['KeypointType', str]) -> Collection[Line]:
"""See base class."""
has_start = set(self.start_avg).issubset(point_by_type.keys())
has_end = set(self.end_avg).issubset(point_by_type.keys())
if not has_start or not has_end:
return []
start = np.mean([point_by_type[s].location for s in self.start_avg], axis=0)
start_color = _combine_colors([colors[s] for s in self.start_avg])
end = np.mean([point_by_type[e].location for e in self.end_avg], axis=0)
end_color = _combine_colors([colors[s] for s in self.end_avg])
return _bicolor_lines(start, start_color, end, end_color, self.width)
_COMMON_EDGES = (BicoloredEdge(KeypointType.KEYPOINT_TYPE_LEFT_ANKLE,
KeypointType.KEYPOINT_TYPE_LEFT_KNEE, 2),
BicoloredEdge(KeypointType.KEYPOINT_TYPE_LEFT_KNEE,
KeypointType.KEYPOINT_TYPE_LEFT_HIP, 2),
BicoloredEdge(KeypointType.KEYPOINT_TYPE_LEFT_HIP,
KeypointType.KEYPOINT_TYPE_LEFT_SHOULDER, 2),
BicoloredEdge(KeypointType.KEYPOINT_TYPE_LEFT_SHOULDER,
KeypointType.KEYPOINT_TYPE_LEFT_ELBOW, 2),
BicoloredEdge(KeypointType.KEYPOINT_TYPE_LEFT_ELBOW,
KeypointType.KEYPOINT_TYPE_LEFT_WRIST, 2),
BicoloredEdge(KeypointType.KEYPOINT_TYPE_RIGHT_ANKLE,
KeypointType.KEYPOINT_TYPE_RIGHT_KNEE, 2),
BicoloredEdge(KeypointType.KEYPOINT_TYPE_RIGHT_KNEE,
KeypointType.KEYPOINT_TYPE_RIGHT_HIP, 2),
BicoloredEdge(KeypointType.KEYPOINT_TYPE_RIGHT_HIP,
KeypointType.KEYPOINT_TYPE_RIGHT_SHOULDER, 2),
BicoloredEdge(KeypointType.KEYPOINT_TYPE_RIGHT_SHOULDER,
KeypointType.KEYPOINT_TYPE_RIGHT_ELBOW, 2),
BicoloredEdge(KeypointType.KEYPOINT_TYPE_RIGHT_ELBOW,
KeypointType.KEYPOINT_TYPE_RIGHT_WRIST, 2),
BicoloredEdge(KeypointType.KEYPOINT_TYPE_LEFT_HIP,
KeypointType.KEYPOINT_TYPE_RIGHT_HIP, 2),
BicoloredEdge(KeypointType.KEYPOINT_TYPE_LEFT_SHOULDER,
KeypointType.KEYPOINT_TYPE_RIGHT_SHOULDER, 2))
_DEFAULT_CAMERA_EDGES = _COMMON_EDGES + (
BicoloredEdge(KeypointType.KEYPOINT_TYPE_NOSE,
KeypointType.KEYPOINT_TYPE_FOREHEAD, 1),
MultipointEdge([KeypointType.KEYPOINT_TYPE_FOREHEAD], [
KeypointType.KEYPOINT_TYPE_LEFT_SHOULDER,
KeypointType.KEYPOINT_TYPE_RIGHT_SHOULDER
], 1))
_DEFAULT_LASER_EDGES = _COMMON_EDGES + (
BicoloredEdge(KeypointType.KEYPOINT_TYPE_NOSE,
KeypointType.KEYPOINT_TYPE_HEAD_CENTER, 1),
MultipointEdge([KeypointType.KEYPOINT_TYPE_HEAD_CENTER], [
KeypointType.KEYPOINT_TYPE_LEFT_SHOULDER,
KeypointType.KEYPOINT_TYPE_RIGHT_SHOULDER
], 1))
_DEFAULT_POINT_COLORS = immutabledict.immutabledict({
KeypointType.KEYPOINT_TYPE_NOSE: '#00FF00',
KeypointType.KEYPOINT_TYPE_FOREHEAD: '#00FFFF',
KeypointType.KEYPOINT_TYPE_HEAD_CENTER: '#00FFFF',
KeypointType.KEYPOINT_TYPE_LEFT_SHOULDER: '#FFA6FE',
KeypointType.KEYPOINT_TYPE_LEFT_ELBOW: '#FFE502',
KeypointType.KEYPOINT_TYPE_LEFT_WRIST: '#006401',
KeypointType.KEYPOINT_TYPE_LEFT_HIP: '#010067',
KeypointType.KEYPOINT_TYPE_LEFT_KNEE: '#95003A',
KeypointType.KEYPOINT_TYPE_LEFT_ANKLE: '#007DB5',
KeypointType.KEYPOINT_TYPE_RIGHT_SHOULDER: '#774D00',
KeypointType.KEYPOINT_TYPE_RIGHT_ELBOW: '#90FB92',
KeypointType.KEYPOINT_TYPE_RIGHT_WRIST: '#0076FF',
KeypointType.KEYPOINT_TYPE_RIGHT_HIP: '#D5FF00',
KeypointType.KEYPOINT_TYPE_RIGHT_KNEE: '#A75740',
KeypointType.KEYPOINT_TYPE_RIGHT_ANKLE: '#6A826C'
})
# Keypoint sigmas from https://cocodataset.org/#keypoints-eval
_OKS_SCALES = immutabledict.immutabledict({
KeypointType.KEYPOINT_TYPE_NOSE: 0.052,
KeypointType.KEYPOINT_TYPE_LEFT_SHOULDER: 0.158,
KeypointType.KEYPOINT_TYPE_RIGHT_SHOULDER: 0.158,
KeypointType.KEYPOINT_TYPE_LEFT_ELBOW: 0.144,
KeypointType.KEYPOINT_TYPE_RIGHT_ELBOW: 0.144,
KeypointType.KEYPOINT_TYPE_LEFT_WRIST: 0.124,
KeypointType.KEYPOINT_TYPE_RIGHT_WRIST: 0.124,
KeypointType.KEYPOINT_TYPE_LEFT_HIP: 0.214,
KeypointType.KEYPOINT_TYPE_RIGHT_HIP: 0.214,
KeypointType.KEYPOINT_TYPE_LEFT_KNEE: 0.174,
KeypointType.KEYPOINT_TYPE_RIGHT_KNEE: 0.174,
KeypointType.KEYPOINT_TYPE_LEFT_ANKLE: 0.178,
KeypointType.KEYPOINT_TYPE_RIGHT_ANKLE: 0.178,
KeypointType.KEYPOINT_TYPE_FOREHEAD: 0.158,
KeypointType.KEYPOINT_TYPE_HEAD_CENTER: 0.158
})
def _default_point_sizes():
max_scale = max(s for s in _OKS_SCALES.values())
return {n: s / max_scale for n, s in _OKS_SCALES.items()}
@dataclasses.dataclass(frozen=True)
class WireframeConfig:
"""Settings to build a wireframe out of a set of keypoints.
Attributes:
edges: types of keypoints to connect with line segments.
point_colors: colors of keypoint types for corresponding dots and line
segments.
point_sizes: nominal sizes of dots.
dot_size_factor: a factor used to compute actual sizes of dots using
`point_sizes`.
line_width_factor: a factor used to compute actual width of lines using
configured edge widths.
"""
edges: Collection[BicoloredEdge]
point_colors: Mapping['KeypointType', str]
point_sizes: Mapping['KeypointType', float]
dot_size_factor: float = 1.0
line_width_factor: float = 1.0
DEFAULT_CAMERA_WIREFRAME_CONFIG = WireframeConfig(
edges=_DEFAULT_CAMERA_EDGES,
point_colors=_DEFAULT_POINT_COLORS,
point_sizes=_default_point_sizes(),
dot_size_factor=6,
line_width_factor=1)
DEFAULT_LASER_WIREFRAME_CONFIG = WireframeConfig(
edges=_DEFAULT_LASER_EDGES,
point_colors=_DEFAULT_POINT_COLORS,
point_sizes=_default_point_sizes(),
dot_size_factor=15,
line_width_factor=4)
def _removeprefix(x, y):
if x.startswith(y):
return x[len(y):]
return x
def point_name(point_type: 'KeypointType') -> str:
"""Returns a short name of the keypoint type."""
name = KeypointType.Name(point_type)
return _removeprefix(name, 'KEYPOINT_TYPE_')
def _build_wireframe(point_by_type: _data.PointByType,
config: WireframeConfig) -> Wireframe:
"""Creates a wireframe for a collection of keypoint coordinates."""
lines = []
for e in config.edges:
e = dataclasses.replace(e, width=e.width * config.line_width_factor)
lines.extend(e.create_lines(point_by_type, config.point_colors))
dots = []
for point_type, point in point_by_type.items():
border_color = OCCLUDED_COLOR if point.is_occluded else None
occlusion_marker = '?' if point.is_occluded else ''
dots.append(
Dot(point.location,
config.point_colors[point_type],
config.point_sizes[point_type] * config.dot_size_factor,
name=f'{point_name(point_type)}{occlusion_marker}',
border_color=border_color))
return Wireframe(lines=lines, dots=dots)
def build_camera_wireframe(
keypoints: Collection[keypoint_pb2.CameraKeypoint],
config: WireframeConfig = DEFAULT_CAMERA_WIREFRAME_CONFIG) -> Wireframe:
"""Creates a wireframe for camera keypoints."""
point_by_type = _data.camera_keypoint_coordinates(keypoints)
return _build_wireframe(point_by_type, config)
def build_laser_wireframe(
keypoints: Collection[keypoint_pb2.LaserKeypoint],
config: WireframeConfig = DEFAULT_LASER_WIREFRAME_CONFIG) -> Wireframe:
"""Creates a wireframe for laser keypoints."""
point_by_type = _data.laser_keypoint_coordinates(keypoints)
return _build_wireframe(point_by_type, config)
def draw_camera_wireframe(ax: plt.Axes, wireframe: Wireframe) -> None:
"""Draws a camera wireframe onto the axes."""
ax.add_collection(
mc.LineCollection(
segments=[[l.start, l.end] for l in wireframe.lines],
colors=[l.color for l in wireframe.lines],
linewidths=[l.width for l in wireframe.lines],
antialiased=True))
dots_sizes = [d.size for d in wireframe.dots]
ax.add_collection(
mc.EllipseCollection(
widths=dots_sizes,
heights=dots_sizes,
angles=0,
units='xy',
offsets=[d.location for d in wireframe.dots],
facecolors=[d.color for d in wireframe.dots],
edgecolors=[d.actual_border_color for d in wireframe.dots],
linewidth=OCCLUDED_BORDER_WIDTH,
transOffset=ax.transData,
antialiased=True))
def draw_laser_wireframe(fig: go.Figure, wireframe: Wireframe) -> None:
"""Draws a laser wireframe onto the plotly Figure."""
for line in wireframe.lines:
points = np.stack([line.start, line.end], axis=0)
fig.add_trace(
go.Scatter3d(
mode='lines',
x=points[:, 0],
y=points[:, 1],
z=points[:, 2],
line=dict(color=line.color, width=line.width)))
dot_coords = np.stack([d.location for d in wireframe.dots], axis=0)
fig.add_trace(
go.Scatter3d(
text=[d.name for d in wireframe.dots],
mode='markers',
x=dot_coords[:, 0],
y=dot_coords[:, 1],
z=dot_coords[:, 2],
marker=dict(
color=[d.color for d in wireframe.dots],
size=[d.size for d in wireframe.dots],
line=dict(
width=OCCLUDED_BORDER_WIDTH,
color=[d.actual_border_color for d in wireframe.dots]))))
def crop_camera_keypoints(
image: np.ndarray,
keypoints: Collection[keypoint_pb2.CameraKeypoint],
box: label_pb2.Label.Box,
margin: float = 0
) -> Tuple[np.ndarray, Collection[keypoint_pb2.CameraKeypoint]]:
"""Crops camera image to the specified bounding box and shifts keypoints.
Args:
image: input image to crop, an array with shape [height, width, 3].
keypoints: a collection of camera keypoints.
box: a 2D bounding box to extract from the input image.
margin: a ratio of the extra margin to add to the image relative to the
input image size.
Returns:
a tuple (new image, shifted keypoints).
"""
new_camera_keypoints = []
crop_width = (1 + margin) * box.length
crop_height = (1 + margin) * box.width
min_x = max(0, int(box.center_x - crop_width / | |
self.osid_object.genus_type == NEW_TYPE
with pytest.raises(errors.IllegalState):
self.catalog.create_assessment_part_for_assessment_part(self.form)
with pytest.raises(errors.InvalidArgument):
self.catalog.create_assessment_part_for_assessment_part('I Will Break You!')
update_form = self.catalog.get_assessment_part_form_for_update(self.osid_object.ident)
with pytest.raises(errors.InvalidArgument):
self.catalog.create_assessment_part_for_assessment_part(update_form)
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.create_assessment_part_for_assessment_part('foo')
def test_can_update_assessment_parts(self):
"""Tests can_update_assessment_parts"""
# From test_templates/resource.py::ResourceAdminSession::can_update_resources_template
assert isinstance(self.catalog.can_update_assessment_parts(), bool)
def test_get_assessment_part_form_for_update(self):
"""Tests get_assessment_part_form_for_update"""
# From test_templates/resource.py::ResourceAdminSession::get_resource_form_for_update_template
if not is_never_authz(self.service_config):
form = self.catalog.get_assessment_part_form_for_update(self.osid_object.ident)
assert isinstance(form, OsidForm)
assert form.is_for_update()
with pytest.raises(errors.InvalidArgument):
self.catalog.get_assessment_part_form_for_update(['This is Doomed!'])
with pytest.raises(errors.InvalidArgument):
self.catalog.get_assessment_part_form_for_update(
Id(authority='Respect my Authoritay!',
namespace='assessment.authoring.{object_name}',
identifier='1'))
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.get_assessment_part_form_for_update(self.fake_id)
def test_update_assessment_part(self):
"""Tests update_assessment_part"""
if not is_never_authz(self.service_config):
form = self.catalog.get_assessment_part_form_for_update(self.osid_object.ident)
form.display_name = 'new name'
form.description = 'new description'
form.set_genus_type(NEW_TYPE_2)
updated_object = self.catalog.update_assessment_part(self.osid_object.ident, form)
assert isinstance(updated_object, ABCObjects.AssessmentPart)
assert updated_object.ident == self.osid_object.ident
assert updated_object.display_name.text == 'new name'
assert updated_object.description.text == 'new description'
assert updated_object.genus_type == NEW_TYPE_2
else:
with pytest.raises(errors.PermissionDenied):
self.session.update_assessment_part(self.fake_id, 'foo')
def test_can_delete_assessment_parts(self):
"""Tests can_delete_assessment_parts"""
# From test_templates/resource.py::ResourceAdminSession::can_delete_resources_template
assert isinstance(self.catalog.can_delete_assessment_parts(), bool)
def test_delete_assessment_part(self):
"""Tests delete_assessment_part"""
if not is_never_authz(self.service_config):
results = self.catalog.get_assessment_parts()
assert results.available() == 1
form = self.catalog.get_assessment_part_form_for_create_for_assessment(self.assessment.ident,
[])
form.display_name = 'new AssessmentPart'
form.description = 'description of AssessmentPart'
new_assessment_part = self.catalog.create_assessment_part_for_assessment(form)
results = self.catalog.get_assessment_parts()
assert results.available() == 2
self.session.delete_assessment_part(new_assessment_part.ident)
results = self.catalog.get_assessment_parts()
assert results.available() == 1
assert str(results.next().ident) != str(new_assessment_part.ident)
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.delete_assessment_part(self.fake_id)
def test_can_manage_assessment_part_aliases(self):
"""Tests can_manage_assessment_part_aliases"""
# From test_templates/resource.py::ResourceAdminSession::can_manage_resource_aliases_template
assert isinstance(self.catalog.can_manage_assessment_part_aliases(), bool)
def test_alias_assessment_part(self):
"""Tests alias_assessment_part"""
# From test_templates/resource.py::ResourceAdminSession::alias_resource_template
if not is_never_authz(self.service_config):
alias_id = Id(self.catalog.ident.namespace + '%3Amy-alias%40ODL.MIT.EDU')
self.catalog.alias_assessment_part(self.osid_object.ident, alias_id)
aliased_object = self.catalog.get_assessment_part(alias_id)
assert aliased_object.ident == self.osid_object.ident
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.alias_assessment_part(self.fake_id, self.fake_id)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def assessment_part_bank_session_class_fixture(request):
request.cls.service_config = request.param
request.cls.assessment_part_list = list()
request.cls.assessment_part_ids = list()
request.cls.svc_mgr = Runtime().get_service_manager(
'ASSESSMENT',
proxy=PROXY,
implementation=request.cls.service_config)
request.cls.fake_id = Id('resource.Resource%3Afake%40DLKIT.MIT.EDU')
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_bank_form_for_create([])
create_form.display_name = 'Test Bank'
create_form.description = 'Test Bank for AssessmentPartBankSession tests'
request.cls.catalog = request.cls.svc_mgr.create_bank(create_form)
create_form = request.cls.svc_mgr.get_bank_form_for_create([])
create_form.display_name = 'Test Bank for Assignment'
create_form.description = 'Test Bank for AssessmentPartBankSession tests assignment'
request.cls.assigned_catalog = request.cls.svc_mgr.create_bank(create_form)
assessment_form = request.cls.catalog.get_assessment_form_for_create([])
assessment_form.display_name = 'Test Assessment'
assessment_form.description = 'Test Assessment for AssessmentPartBankSession tests'
request.cls.assessment = request.cls.catalog.create_assessment(assessment_form)
for num in [0, 1, 2]:
create_form = request.cls.catalog.get_assessment_part_form_for_create_for_assessment(request.cls.assessment.ident, [])
create_form.display_name = 'Test AssessmentPart ' + str(num)
create_form.description = 'Test AssessmentPart for AssessmentPartBankSession tests'
obj = request.cls.catalog.create_assessment_part_for_assessment(create_form)
request.cls.assessment_part_list.append(obj)
request.cls.assessment_part_ids.append(obj.ident)
request.cls.svc_mgr.assign_assessment_part_to_bank(
request.cls.assessment_part_ids[1], request.cls.assigned_catalog.ident)
request.cls.svc_mgr.assign_assessment_part_to_bank(
request.cls.assessment_part_ids[2], request.cls.assigned_catalog.ident)
def class_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.unassign_assessment_part_from_bank(
request.cls.assessment_part_ids[1], request.cls.assigned_catalog.ident)
request.cls.svc_mgr.unassign_assessment_part_from_bank(
request.cls.assessment_part_ids[2], request.cls.assigned_catalog.ident)
for obj in request.cls.catalog.get_assessment_parts():
request.cls.catalog.delete_assessment_part(obj.ident)
request.cls.catalog.delete_assessment(request.cls.assessment.ident)
request.cls.svc_mgr.delete_bank(request.cls.assigned_catalog.ident)
request.cls.svc_mgr.delete_bank(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def assessment_part_bank_session_test_fixture(request):
request.cls.session = request.cls.svc_mgr
@pytest.mark.usefixtures("assessment_part_bank_session_class_fixture", "assessment_part_bank_session_test_fixture")
class TestAssessmentPartBankSession(object):
"""Tests for AssessmentPartBankSession"""
def test_can_lookup_assessment_part_bank_mappings(self):
"""Tests can_lookup_assessment_part_bank_mappings"""
# From test_templates/resource.py::ResourceBinSession::can_lookup_resource_bin_mappings
result = self.session.can_lookup_assessment_part_bank_mappings()
assert isinstance(result, bool)
def test_use_comparative_assessment_part_bank_view(self):
"""Tests use_comparative_assessment_part_bank_view"""
# From test_templates/resource.py::BinLookupSession::use_comparative_bin_view_template
self.svc_mgr.use_comparative_assessment_part_bank_view()
def test_use_plenary_assessment_part_bank_view(self):
"""Tests use_plenary_assessment_part_bank_view"""
# From test_templates/resource.py::BinLookupSession::use_plenary_bin_view_template
self.svc_mgr.use_plenary_assessment_part_bank_view()
def test_get_assessment_part_ids_by_bank(self):
"""Tests get_assessment_part_ids_by_bank"""
# From test_templates/resource.py::ResourceBinSession::get_resource_ids_by_bin_template
if not is_never_authz(self.service_config):
objects = self.svc_mgr.get_assessment_part_ids_by_bank(self.assigned_catalog.ident)
assert objects.available() == 2
else:
with pytest.raises(errors.PermissionDenied):
self.svc_mgr.get_assessment_part_ids_by_bank(self.fake_id)
def test_get_assessment_parts_by_bank(self):
"""Tests get_assessment_parts_by_bank"""
# From test_templates/resource.py::ResourceBinSession::get_resources_by_bin_template
if not is_never_authz(self.service_config):
results = self.session.get_assessment_parts_by_bank(self.assigned_catalog.ident)
assert isinstance(results, ABCObjects.AssessmentPartList)
assert results.available() == 2
else:
with pytest.raises(errors.PermissionDenied):
self.session.get_assessment_parts_by_bank(self.fake_id)
def test_get_assessment_part_ids_by_banks(self):
"""Tests get_assessment_part_ids_by_banks"""
# From test_templates/resource.py::ResourceBinSession::get_resource_ids_by_bins_template
if not is_never_authz(self.service_config):
catalog_ids = [self.catalog.ident, self.assigned_catalog.ident]
object_ids = self.session.get_assessment_part_ids_by_banks(catalog_ids)
assert isinstance(object_ids, IdList)
# Currently our impl does not remove duplicate objectIds
assert object_ids.available() == 5
else:
with pytest.raises(errors.PermissionDenied):
self.session.get_assessment_part_ids_by_banks([self.fake_id])
def test_get_assessment_parts_by_banks(self):
"""Tests get_assessment_parts_by_banks"""
# From test_templates/resource.py::ResourceBinSession::get_resources_by_bins_template
if not is_never_authz(self.service_config):
catalog_ids = [self.catalog.ident, self.assigned_catalog.ident]
results = self.session.get_assessment_parts_by_banks(catalog_ids)
assert isinstance(results, ABCObjects.AssessmentPartList)
# Currently our impl does not remove duplicate objects
assert results.available() == 5
else:
with pytest.raises(errors.PermissionDenied):
self.session.get_assessment_parts_by_banks([self.fake_id])
def test_get_bank_ids_by_assessment_part(self):
"""Tests get_bank_ids_by_assessment_part"""
# From test_templates/resource.py::ResourceBinSession::get_bin_ids_by_resource_template
if not is_never_authz(self.service_config):
cats = self.svc_mgr.get_bank_ids_by_assessment_part(self.assessment_part_ids[1])
assert cats.available() == 2
else:
with pytest.raises(errors.PermissionDenied):
self.svc_mgr.get_bank_ids_by_assessment_part(self.fake_id)
def test_get_banks_by_assessment_part(self):
"""Tests get_banks_by_assessment_part"""
# From test_templates/resource.py::ResourceBinSession::get_bins_by_resource_template
if not is_never_authz(self.service_config):
cats = self.svc_mgr.get_banks_by_assessment_part(self.assessment_part_ids[1])
assert cats.available() == 2
else:
with pytest.raises(errors.PermissionDenied):
self.svc_mgr.get_banks_by_assessment_part(self.fake_id)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def assessment_part_bank_assignment_session_class_fixture(request):
request.cls.service_config = request.param
request.cls.assessment_part_list = list()
request.cls.assessment_part_ids = list()
request.cls.svc_mgr = Runtime().get_service_manager(
'ASSESSMENT',
proxy=PROXY,
implementation=request.cls.service_config)
request.cls.fake_id = Id('resource.Resource%3Afake%40DLKIT.MIT.EDU')
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_bank_form_for_create([])
create_form.display_name = 'Test Bank'
create_form.description = 'Test Bank for AssessmentPartBankAssignmentSession tests'
request.cls.catalog = request.cls.svc_mgr.create_bank(create_form)
create_form = request.cls.svc_mgr.get_bank_form_for_create([])
create_form.display_name = 'Test Bank for Assignment'
create_form.description = 'Test Bank for AssessmentPartBankAssignmentSession tests assignment'
request.cls.assigned_catalog = request.cls.svc_mgr.create_bank(create_form)
assessment_form = request.cls.catalog.get_assessment_form_for_create([])
assessment_form.display_name = 'Test Assessment'
assessment_form.description = 'Test Assessment for AssessmentPartBankAssignmentSession tests'
request.cls.assessment = request.cls.catalog.create_assessment(assessment_form)
for num in [0, 1, 2]:
create_form = request.cls.catalog.get_assessment_part_form_for_create_for_assessment(request.cls.assessment.ident, [])
create_form.display_name = 'Test AssessmentPart ' + str(num)
create_form.description = 'Test AssessmentPart for AssessmentPartBankAssignmentSession tests'
obj = request.cls.catalog.create_assessment_part_for_assessment(create_form)
request.cls.assessment_part_list.append(obj)
request.cls.assessment_part_ids.append(obj.ident)
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for obj in request.cls.catalog.get_assessment_parts():
request.cls.catalog.delete_assessment_part(obj.ident)
request.cls.catalog.delete_assessment(request.cls.assessment.ident)
request.cls.svc_mgr.delete_bank(request.cls.assigned_catalog.ident)
request.cls.svc_mgr.delete_bank(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def assessment_part_bank_assignment_session_test_fixture(request):
request.cls.session = request.cls.svc_mgr
@pytest.mark.usefixtures("assessment_part_bank_assignment_session_class_fixture", "assessment_part_bank_assignment_session_test_fixture")
class TestAssessmentPartBankAssignmentSession(object):
"""Tests for AssessmentPartBankAssignmentSession"""
def test_can_assign_assessment_parts(self):
"""Tests can_assign_assessment_parts"""
# From test_templates/resource.py::ResourceBinAssignmentSession::can_assign_resources_template
result = self.session.can_assign_assessment_parts()
assert isinstance(result, bool)
def test_can_assign_assessment_parts_to_bank(self):
"""Tests can_assign_assessment_parts_to_bank"""
# From test_templates/resource.py::ResourceBinAssignmentSession::can_assign_resources_to_bin_template
result = self.session.can_assign_assessment_parts_to_bank(self.assigned_catalog.ident)
assert isinstance(result, bool)
def test_get_assignable_bank_ids(self):
"""Tests get_assignable_bank_ids"""
# From test_templates/resource.py::ResourceBinAssignmentSession::get_assignable_bin_ids_template
# Note that our implementation just returns all catalogIds, which does not follow
# the OSID spec (should return only the catalogIds below the given one in the hierarchy.
if not is_never_authz(self.service_config):
results = self.session.get_assignable_bank_ids(self.catalog.ident)
assert isinstance(results, IdList)
# Because we're not deleting all banks from all tests, we might
# have some crufty banks here...but there should be at least 2.
assert results.available() >= 2
else:
with pytest.raises(errors.PermissionDenied):
self.session.get_assignable_bank_ids(self.fake_id)
def test_get_assignable_bank_ids_for_assessment_part(self):
"""Tests get_assignable_bank_ids_for_assessment_part"""
# From test_templates/resource.py::ResourceBinAssignmentSession::get_assignable_bin_ids_for_resource_template
# Note that our implementation just returns all catalogIds, which does not follow
# the OSID spec (should return only the catalogIds below the given one in the hierarchy.
if not is_never_authz(self.service_config):
results = self.session.get_assignable_bank_ids_for_assessment_part(self.catalog.ident, self.assessment_part_ids[0])
assert isinstance(results, IdList)
# Because we're not deleting all banks from all tests, we might
# have some crufty banks here...but there should be at least 2.
assert results.available() >= 2
else:
with pytest.raises(errors.PermissionDenied):
self.session.get_assignable_bank_ids_for_assessment_part(self.fake_id, self.fake_id)
def test_assign_assessment_part_to_bank(self):
"""Tests assign_assessment_part_to_bank"""
# From test_templates/resource.py::ResourceBinAssignmentSession::assign_resource_to_bin_template
if not is_never_authz(self.service_config):
results = self.assigned_catalog.get_assessment_parts()
assert results.available() == 0
self.session.assign_assessment_part_to_bank(self.assessment_part_ids[1], self.assigned_catalog.ident)
results = self.assigned_catalog.get_assessment_parts()
assert results.available() == 1
self.session.unassign_assessment_part_from_bank(
self.assessment_part_ids[1],
self.assigned_catalog.ident)
else:
with pytest.raises(errors.PermissionDenied):
self.session.assign_assessment_part_to_bank(self.fake_id, self.fake_id)
def test_unassign_assessment_part_from_bank(self):
"""Tests unassign_assessment_part_from_bank"""
# From test_templates/resource.py::ResourceBinAssignmentSession::unassign_resource_from_bin_template
if not is_never_authz(self.service_config):
results = self.assigned_catalog.get_assessment_parts()
assert results.available() == 0
self.session.assign_assessment_part_to_bank(
self.assessment_part_ids[1],
self.assigned_catalog.ident)
results = self.assigned_catalog.get_assessment_parts()
assert results.available() == 1
self.session.unassign_assessment_part_from_bank(
self.assessment_part_ids[1],
self.assigned_catalog.ident)
results = self.assigned_catalog.get_assessment_parts()
assert results.available() == 0
else:
with pytest.raises(errors.PermissionDenied):
self.session.unassign_assessment_part_from_bank(self.fake_id, self.fake_id)
def test_reassign_assessment_part_to_bank(self):
"""Tests reassign_assessment_part_to_bank"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.session.reassign_assessment_part_to_bank(True, True, True)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def assessment_part_item_session_class_fixture(request):
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'ASSESSMENT_AUTHORING',
proxy=PROXY,
implementation=request.cls.service_config)
request.cls.fake_id = Id('resource.Resource%3Afake%40DLKIT.MIT.EDU')
@pytest.fixture(scope="function")
def assessment_part_item_session_test_fixture(request):
request.cls.item_list = list()
request.cls.item_ids = list()
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_bank_form_for_create([])
create_form.display_name = 'Test Bank'
create_form.description = 'Test Bank for AssessmentPartItemSession tests'
request.cls.catalog = request.cls.svc_mgr.create_bank(create_form)
create_form = request.cls.catalog.get_assessment_form_for_create([])
create_form.display_name = 'Test Assessment'
create_form.description = 'Test Assessment for AssessmentPartItemSession tests'
request.cls.assessment = request.cls.catalog.create_assessment(create_form)
create_form = request.cls.catalog.get_assessment_part_form_for_create_for_assessment(request.cls.assessment.ident, [])
create_form.display_name = 'Test Assessment Part'
create_form.description = 'Test Assessment Part for AssessmentPartItemSession tests'
request.cls.assessment_part = request.cls.catalog.create_assessment_part_for_assessment(create_form)
for num in [0, 1, 2, 3]:
create_form = request.cls.catalog.get_item_form_for_create([])
create_form.display_name = 'Test Item ' + str(num)
create_form.description = 'Test Item for AssessmentPartItemSession tests'
obj = request.cls.catalog.create_item(create_form)
request.cls.item_list.append(obj)
request.cls.item_ids.append(obj.ident)
request.cls.catalog.add_item(obj.ident, request.cls.assessment_part.ident)
else:
request.cls.catalog = request.cls.svc_mgr.get_assessment_part_item_session(proxy=PROXY)
request.cls.session = request.cls.catalog
def test_tear_down():
if not is_never_authz(request.cls.service_config):
for catalog in request.cls.svc_mgr.get_banks():
for obj in catalog.get_assessment_parts():
if obj.has_children():
for child_id in obj.get_child_assessment_part_ids():
catalog.delete_assessment_part(child_id)
catalog.delete_assessment_part(obj.ident)
for obj in catalog.get_assessments():
catalog.delete_assessment(obj.ident)
for obj in catalog.get_items():
catalog.delete_item(obj.ident)
request.cls.svc_mgr.delete_bank(catalog.ident)
request.addfinalizer(test_tear_down)
@pytest.mark.usefixtures("assessment_part_item_session_class_fixture", "assessment_part_item_session_test_fixture")
class TestAssessmentPartItemSession(object):
"""Tests for AssessmentPartItemSession"""
def test_get_bank_id(self):
"""Tests get_bank_id"""
# From test_templates/resource.py ResourceLookupSession.get_bin_id_template
if not is_never_authz(self.service_config):
assert self.catalog.get_bank_id() == self.catalog.ident
def test_get_bank(self):
"""Tests get_bank"""
# is this test really needed?
# From test_templates/resource.py::ResourceLookupSession::get_bin_template
if not is_never_authz(self.service_config):
assert isinstance(self.catalog.get_bank(), ABCBank)
def test_can_access_assessment_part_items(self):
"""Tests can_access_assessment_part_items"""
assert isinstance(self.session.can_access_assessment_part_items(), bool)
def test_use_comparative_asseessment_part_item_view(self):
"""Tests use_comparative_asseessment_part_item_view"""
# From test_templates/resource.py ResourceLookupSession.use_comparative_resource_view_template
self.catalog.use_comparative_asseessment_part_item_view()
def test_use_plenary_assessment_part_item_view(self):
"""Tests use_plenary_assessment_part_item_view"""
# From test_templates/resource.py ResourceLookupSession.use_plenary_resource_view_template
self.catalog.use_plenary_assessment_part_item_view()
def test_use_federated_bank_view(self):
"""Tests use_federated_bank_view"""
# From test_templates/resource.py ResourceLookupSession.use_federated_bin_view_template
self.catalog.use_federated_bank_view()
def test_use_isolated_bank_view(self):
"""Tests use_isolated_bank_view"""
# From test_templates/resource.py ResourceLookupSession.use_isolated_bin_view_template
self.catalog.use_isolated_bank_view()
def test_get_assessment_part_items(self):
"""Tests get_assessment_part_items"""
# From test_templates/repository.py::AssetCompositionSession::get_composition_assets_template
if not is_never_authz(self.service_config):
assert self.catalog.get_assessment_part_items(self.assessment_part.ident).available() == 4
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.get_assessment_part_items(self.fake_id)
def test_get_assessment_parts_by_item(self):
"""Tests get_assessment_parts_by_item"""
# From test_templates/repository.py::AssetCompositionSession::get_compositions_by_asset_template
if not is_never_authz(self.service_config):
assert self.catalog.get_assessment_parts_by_item(self.item_ids[0]).available() == 1
assert self.catalog.get_assessment_parts_by_item(self.item_ids[0]).next().ident == self.assessment_part.ident
else:
with pytest.raises(errors.PermissionDenied):
self.catalog.get_assessment_parts_by_item(self.fake_id)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def assessment_part_item_design_session_class_fixture(request):
request.cls.service_config = request.param
request.cls.item_list = list()
request.cls.item_ids = list()
request.cls.svc_mgr = Runtime().get_service_manager(
'ASSESSMENT_AUTHORING',
proxy=PROXY,
implementation=request.cls.service_config)
request.cls.fake_id = Id('resource.Resource%3Afake%40DLKIT.MIT.EDU')
if | |
<gh_stars>1-10
import numpy as np
import os
import pytest
import sys
from numpy import testing
from textwrap import dedent
from moflow.mf.name import Modflow
from moflow.mf.base import MFPackage
from moflow.mf.reader import MFFileReader
if sys.version_info[0] < 3:
from io import BytesIO
StringIO = BytesIO
else:
from io import StringIO, BytesIO
class ExamplePackage(MFPackage):
par1 = None
par2 = None
def test_mf_reader_basics():
p = ExamplePackage()
f = StringIO(dedent('''\
# A comment
100 ignore
200 -2.4E-12
4 500 FREE
-44 888.0
last line
'''))
r = MFFileReader(f, p)
assert r.not_eof
assert r.lineno == 0
assert len(r) == 6
# 0: Text
r.read_text()
assert r.lineno == 1
assert p.text == ['A comment']
# 1: an item
assert r.get_items(1, 1, 'i') == [100]
assert r.lineno == 2
# 2: two named items
with pytest.raises(ValueError):
r.read_named_items(2, ('par1', 'par2'), 'i')
r.lineno -= 1 # manually scroll back 1 line and try again
r.read_named_items(2, ('par1', 'par2'), 'f')
assert p.par1 == 200.0
testing.assert_almost_equal(p.par2, -2.4E-12)
# 3: three named items
items = r.get_named_items(3, ['a', 'b'], 'f')
assert items == {'a': 4.0, 'b': 500.0}
# 4: two named items
r.read_named_items(4, ['par1', 'par2'], 'f')
assert p.par1 == -44.0
testing.assert_almost_equal(p.par2, 888.0)
# post-Data Set
assert r.not_eof
assert r.nextline() == 'last line\n'
assert r.lineno == 6
assert not r.not_eof
# Try to read past EOF
with pytest.raises(IndexError):
r.nextline()
assert r.lineno == 6
def test_mf_reader_empty():
p = ExamplePackage()
f = StringIO('# Empty file')
r = MFFileReader(f, p)
assert r.not_eof
assert r.lineno == 0
assert len(r) == 1
# Item 0: Text
r.read_text()
assert r.lineno == 1
assert p.text == ['Empty file']
assert not r.not_eof
def test_mf_read_free_arrays():
# Examples from page 8-59 of TM6A16_MF2005
m = Modflow()
p = ExamplePackage()
m.append(p)
f = StringIO(dedent('''\
CONSTANT 5.7 This sets an entire array to the value "5.7".
INTERNAL 1.0 (7F4.0) 3 This reads the array values from the ...
1.2 3.7 9.3 4.2 2.2 9.9 1.0
3.3 4.9 7.3 7.5 8.2 8.7 6.6
4.5 5.7 2.2 1.1 1.7 6.7 6.9
7.4 3.5 7.8 8.5 7.4 6.8 8.8
EXTERNAL 52 1.0 (7F4.0) 3 This reads the array from the formatted..
EXTERNAL 47 1.0 (BINARY) 3 This reads the array from the binary ...
OPEN/CLOSE test.dat 1.0 (7F4.0) 3 This reads the array from the ...
'''))
# Prepare ASCII data for unit 52, and for test.dat
d2_str = (
' 1.2 3.7 9.3 4.2 2.2 9.9 1.0\n'
' 3.3 4.9 7.3 7.5 8.2 8.7 6.6\n'
' 4.5 5.7 2.2 1.1 1.7 6.7 6.9\n'
' 7.4 3.5 7.8 8.5 7.4 6.8 8.8\n'
)
m[52] = StringIO(d2_str)
# Prepare binary data for unit 47
d2_expected = np.array(
[[1.2, 3.7, 9.3, 4.2, 2.2, 9.9, 1.0],
[3.3, 4.9, 7.3, 7.5, 8.2, 8.7, 6.6],
[4.5, 5.7, 2.2, 1.1, 1.7, 6.7, 6.9],
[7.4, 3.5, 7.8, 8.5, 7.4, 6.8, 8.8]], 'f')
m[47] = BytesIO(d2_expected.tostring())
r = MFFileReader(f, p)
# Data Number 1: Read constant 4x5 array
d1_shape = (4, 7)
d1_expected = np.ones(d1_shape, 'f') * 5.7
d1 = r.get_array(1, d1_shape, 'f', return_dict=True)
assert not hasattr(d1, 'locat')
assert d1['cntrl'] == 'CONSTANT'
assert d1['cnstnt'] == '5.7'
assert d1['text'] == 'This sets an entire array to the value "5.7".'
testing.assert_array_equal(d1['array'], d1_expected)
assert r.lineno == 1
# Data Number 2: Read internal 4x7 array
d2_shape = (4, 7)
d2 = r.get_array(2, d2_shape, 'f', return_dict=True)
assert d2['cntrl'] == 'INTERNAL'
assert d2['cnstnt'] == '1.0'
assert d2['fmtin'] == '(7F4.0)'
assert d2['iprn'] == '3'
assert d2['text'] == 'This reads the array values from the ...'
testing.assert_array_equal(d2['array'], d2_expected)
assert r.lineno == 6
# Data Number 3: EXTERNAL ASCII
d3 = r.get_array(3, d2_shape, 'f', return_dict=True)
assert d3['cntrl'] == 'EXTERNAL'
assert d3['nunit'] == 52
assert d3['cnstnt'] == '1.0'
assert d3['fmtin'] == '(7F4.0)'
assert d3['iprn'] == '3'
testing.assert_array_equal(d3['array'], d2_expected)
assert r.lineno == 7
# Data Number 4: EXTERNAL BINARY
d4 = r.get_array(4, d2_shape, 'f', return_dict=True)
assert d4['cntrl'] == 'EXTERNAL'
assert d4['nunit'] == 47
assert d4['cnstnt'] == '1.0'
assert d4['fmtin'] == '(BINARY)'
assert d4['iprn'] == '3'
testing.assert_array_equal(d4['array'], d2_expected)
assert r.lineno == 8
# Data Number 5: OPEN/CLOSE test.dat
d5_fname = 'test.dat'
with open(d5_fname, 'w') as fp:
fp.write(d2_str)
d5 = r.get_array(5, d2_shape, 'f', return_dict=True)
os.unlink(d5_fname)
assert d5['cntrl'] == 'OPEN/CLOSE'
assert d5['fname'] == d5_fname
assert d5['cnstnt'] == '1.0'
assert d5['fmtin'] == '(7F4.0)'
assert d5['iprn'] == '3'
testing.assert_array_equal(d5['array'], d2_expected)
assert r.lineno == 9
assert not r.not_eof
def test_mf_read_fixed_arrays():
m = Modflow()
p = ExamplePackage()
p.nunit = 11
m.append(p)
f = StringIO('''\
11 1. (15f4.0) 7 WETDRY-1
2. 2. 2. 2. 2. 2. 2. 2. -2. -2. -2. -2. -2. -2. -2.
2. 2. 2. 2. 2. 2. 2. 2. -2. -2. -2. -2. -2. -2. -2.
2. 2. 2. -2. 2. 2. 2. 2. -2. -2. -2. -2. -2. -2. -2.
2. 2. 2. 2. 2. 2. 2. 2. -2. -2. -2. -2. -2. -2. -2.
2. 2. 2. 2. 2. 2. 2. 2. -2. -2. -2. -2. -2. -2. -2.
2. 2. 2. -2. 2. 2. 2. 2. -2. -2. -2. -2. -2. -2. -2.
2. 2. 2. 2. 2. 2. 2. 2. -2. -2. -2. -2. -2. -2. -2.
11 1(12I2) 3
1 1 1 1 1 1 1 1 1 1 1 1
1 1 9 1 1 1 1 1 1 1 1 1
11 1(13I3) IBOUND L1
-1 1 1 1 1 1 1 1 1 1 1 1 -1
11 1(13I3) IBOUND L2
-1 1 1 1 1 1 1 1 1 1 1 1 -1
0 6. (15f4.0) 7 # Some text
0 8(12I2) 3
-17 1. (binary) 7
16 1(24I3) 3
16 1(24I3) 3
0 145.
''')
# Prepare two ASCII data sets for unit 16
m[16] = StringIO('''\
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 | |
01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 6, 22)
sj_session1 = 'sub-20_220617'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-20_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-21':
# subject information
initials = 'sub-21'
firstName = 'sub-21'
standardFSID = 'sub-21_220617'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 6, 29)
sj_session1 = 'sub-21_290617'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-21_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-05':
# subject information
initials = 'sub-05'
firstName = 'sub-05'
standardFSID = 'sub-05_180717'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 7, 18)
sj_session1 = 'sub-05_180717'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-05_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-22':
# subject information
initials = 'sub-22'
firstName = 'sub-22'
standardFSID = 'sub-22_220611'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 6, 22)
sj_session1 = 'sub-22_220617'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r7.edf' ),
},
{'ID' : 8, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-22_s1_r8.edf' ),
},
]
# ----------------------
# Initialise session -
# ----------------------
runWholeSession( runDecisionArray, presentSession )
########################################################################################################################################################################################################
if which_subject == 'sub-23':
# subject information
initials = 'sub-23'
firstName = 'sub-23'
standardFSID = 'sub-23_010100'
birthdate = datetime.date( 1900, 01, 01 )
labelFolderOfPreference = '2014_custom'
presentSubject = Subject( initials, firstName, birthdate, standardFSID, labelFolderOfPreference )
presentProject = Project( 'feedback', subject = presentSubject, base_dir = os.path.join(this_project_folder, 'data') )
sessionID = 'feedback' + presentSubject.initials
sj_session1 = []
if s == 1:
sessionDate = datetime.date(2017, 10, 11)
sj_session1 = 'sub-23_111017'
presentSession = VisualSession(sessionID, sessionDate, presentProject, presentSubject)
try:
os.mkdir(os.path.join(this_project_folder, 'data', initials))
except OSError:
presentSession.logger.debug('output folders already exist')
# ----------------------
# Decision tasks: -
# ----------------------
if s == 1:
runDecisionArray = [
# Measure IRF:
{'ID' : 1, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r1.edf' ),
},
{'ID' : 2, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r2.edf' ),
},
{'ID' : 3, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r3.edf' ),
},
{'ID' : 4, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r4.edf' ),
},
{'ID' : 5, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r5.edf' ),
},
{'ID' : 6, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r6.edf' ),
},
{'ID' : 7, 'scanType': 'main_task', 'condition': 'task', 'session' : 1,
'eyeLinkFilePath': os.path.join(this_raw_folder, initials, sj_session1, 'eye', 'sub-23_s1_r7.edf' ),
},
{'ID' : | |
#!/usr/bin/env python
"""
`htrc.volumes`
Contains functions to retrieve volumes from the HTRC Data API.
The functions in this package will not operate unless they are
executed from an HTRC Data Capsule in Secure Mode. The module
`htrc.mock.volumes` contains Patch objects for testing workflows.
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from htrc.models import HtrcPage
import http.client
from io import BytesIO, TextIOWrapper
import json
import os.path
import progressbar
import socket
import ssl
from urllib.parse import urlencode
from zipfile import ZipFile # used to decompress requested zip archives.
from tqdm import tqdm
from htrc.runningheaders import parse_page_structure
from functools import partial
import pandas as pd
from htrc.util import split_items
import htrc.config
import multiprocessing
import logging
from logging import NullHandler
logging.getLogger(__name__).addHandler(NullHandler())
def get_volumes(data_api_config: htrc.config.HtrcDataApiConfig, volume_ids, concat=False, mets=False, buffer_size=128):
"""
Returns volumes from the Data API as a raw zip stream.
Parameters:
:token: An OAuth2 token for the app.
:volume_ids: A list of volume_ids
:concat: If True, return a single file per volume. If False, return a single
file per page (default).
:host: Data API host
:port: Data API port
"""
if not volume_ids:
raise ValueError("volume_ids is empty.")
url = data_api_config.epr + "volumes"
for id in volume_ids:
if ("." not in id
or " " in id):
print("Invalid volume id " + id + ". Please correct this volume id and try again.")
data = {'volumeIDs': '|'.join(
[id.replace('+', ':').replace('=', '/') for id in volume_ids])}
if concat:
data['concat'] = 'true'
if mets:
data['mets'] = 'true'
# Authorization
headers = {"Authorization": "Bearer " + data_api_config.token,
"Content-type": "application/x-www-form-urlencoded"}
# Create SSL lookup
# TODO: Fix SSL cert verification
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# Retrieve the volumes
httpsConnection = http.client.HTTPSConnection(
data_api_config.host,
data_api_config.port,
context=ctx,
key_file=data_api_config.key,
cert_file=data_api_config.cert)
httpsConnection.request("POST", url, urlencode(data), headers)
response = httpsConnection.getresponse()
if response.status is 200:
body = True
data = BytesIO()
bytes_downloaded = 0
bar = progressbar.ProgressBar(max_value=progressbar.UnknownLength,
widgets=[progressbar.AnimatedMarker(), ' ',
progressbar.DataSize(),
' (', progressbar.FileTransferSpeed(), ')'])
while body:
body = response.read(buffer_size)
data.write(body)
bytes_downloaded += len(body)
bar.update(bytes_downloaded)
data = data.getvalue()
else:
logging.debug("Unable to get volumes")
logging.debug("Response Code: {}".format(response.status))
logging.debug("Response: {}".format(response.reason))
raise EnvironmentError("Unable to get volumes.")
if httpsConnection is not None:
httpsConnection.close()
return data
def get_pages(data_api_config: htrc.config.HtrcDataApiConfig, page_ids, concat=False, mets=False, buffer_size=128):
"""
Returns a ZIP file containing specfic pages.
Parameters:
:data_api_config: The configuration data of the DataAPI endpoint.
:volume_ids: A list of volume_ids
:concat: If True, return a single file per volume. If False, return a single
file per page (default).
"""
if not page_ids:
raise ValueError("page_ids is empty.")
url = data_api_config.epr + "pages"
for id in page_ids:
if ("." not in id
or " " in id):
print("Invalid volume id " + id + ". Please correct this volume id and try again.")
data = {'pageIDs': '|'.join(
[id.replace('+', ':').replace('=', '/') for id in page_ids])}
if concat and mets:
print("Cannot set both concat and mets with pages.")
elif concat:
data['concat'] = 'true'
elif mets:
data['mets'] = 'true'
# Authorization
headers = {"Authorization": "Bearer " + data_api_config.token,
"Content-type": "application/x-www-form-urlencoded"}
# Create SSL lookup
# TODO: Fix SSL cert verification
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# Retrieve the volumes
httpsConnection = http.client.HTTPSConnection(
data_api_config.host,
data_api_config.port,
context=ctx,
key_file=data_api_config.key,
cert_file=data_api_config.cert
)
httpsConnection.request("POST", url, urlencode(data), headers)
response = httpsConnection.getresponse()
if response.status is 200:
body = True
data = BytesIO()
bytes_downloaded = 0
bar = progressbar.ProgressBar(max_value=progressbar.UnknownLength,
widgets=[progressbar.AnimatedMarker(), ' ',
progressbar.DataSize(),
' (', progressbar.FileTransferSpeed(), ')'])
while body:
body = response.read(buffer_size)
data.write(body)
bytes_downloaded += len(body)
bar.update(bytes_downloaded)
data = data.getvalue()
else:
logging.debug("Unable to get pages")
logging.debug("Response Code: ".format(response.status))
logging.debug("Response: ".format(response.reason))
raise EnvironmentError("Unable to get pages.")
if httpsConnection is not None:
httpsConnection.close()
return data
def get_oauth2_token(username, password):
# make sure to set the request content-type as application/x-www-form-urlencoded
headers = {"Content-type": "application/x-www-form-urlencoded"}
data = {"grant_type": "client_credentials",
"client_secret": password,
"client_id": username}
data = urlencode(data)
# create an SSL context
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# make sure the request method is POST
host, port = htrc.config.get_oauth2_host_port()
oauth2port = htrc.config.get_oauth2_port()
oauth2EPRurl = htrc.config.get_oauth2_url()
httpsConnection = http.client.HTTPSConnection(host, oauth2port, context=ctx)
httpsConnection.request("POST", oauth2EPRurl + "?" + data, "", headers)
response = httpsConnection.getresponse()
# if response status is OK
if response.status == 200:
data = response.read().decode('utf8')
jsonData = json.loads(data)
logging.info("*** JSON: {}".format(jsonData))
token = jsonData["access_token"]
logging.info("*** parsed token: {}".format(token))
else:
logging.debug("Unable to get token")
logging.debug("Response Code: {}".format(response.status))
logging.debug("Response: {}".format(response.reason))
logging.debug(response.read())
raise EnvironmentError("Unable to get the token.")
if httpsConnection is not None:
httpsConnection.close()
return token
def grep_error(file_name, output_dir, pattern, txt_index):
na_volume = []
if output_dir.endswith("/"):
file_path = output_dir + file_name
else:
file_path = output_dir + "/" + file_name
if os.path.isfile(file_path):
for line in open(file_path):
if pattern in line:
na_volume.append(line.split()[txt_index])
return na_volume
def _to_htrc_page(page_file, zip):
with TextIOWrapper(BytesIO(zip.read(page_file)), encoding='utf-8') as page:
return HtrcPage([line.rstrip() for line in page.readlines()])
def download_volumes(volume_ids, output_dir, concat=False, mets=False, pages=False,
remove_headers_footers=False, hf_window_size=6, hf_min_similarity=0.7, skip_removed_hf=False,
parallelism=multiprocessing.cpu_count(), batch_size=250, data_api_config=None):
if not 0 < parallelism <= multiprocessing.cpu_count():
raise ValueError("Invalid parallelism level specified")
remove_hf_fun = partial(
_remove_headers_footers_and_save,
concat=concat,
hf_min_similarity=hf_min_similarity,
hf_window_size=hf_window_size,
skip_removed_hf=skip_removed_hf,
output_dir=output_dir
)
volume_ids = list(set(volume_ids)) # ensure unique volume ids
num_vols = len(volume_ids)
data_api_config = data_api_config or htrc.config.HtrcDataApiConfig()
os.makedirs(output_dir, exist_ok=True)
if any((data_api_config.token, data_api_config.host, data_api_config.port)) is not None:
logging.info("obtained token: %s\n" % data_api_config.token)
try:
errors = []
rights = []
with tqdm(total=num_vols) as progress, multiprocessing.Pool(processes=parallelism) as pool:
for ids in split_items(volume_ids, batch_size):
if pages:
if concat and mets:
raise ValueError("Cannot set both concat and mets with pages.")
else:
data = get_pages(data_api_config, ids, concat and not remove_headers_footers, mets)
else:
data = get_volumes(data_api_config, ids, concat and not remove_headers_footers, mets)
volumes = []
with ZipFile(BytesIO(data)) as vols_zip:
zip_list = vols_zip.namelist()
if 'ERROR.err' in zip_list:
errors.append(vols_zip.read('ERROR.err').decode('utf-8'))
zip_list.remove('ERROR.err')
if 'volume-rights.txt' in zip_list:
rights_data = vols_zip.read('volume-rights.txt').decode('utf-8')
zip_list.remove('volume-rights.txt')
if not rights:
rights.append(rights_data)
else:
# due to the format in which 'volume-rights.txt' is created, we have to skip
# the first 4 lines which make up the header of the file, to extract only the
# actual volume rights data for accumulation
rights.append(''.join(rights_data.splitlines(keepends=True)[4:]))
zip_volume_paths = [zip_vol_path for zip_vol_path in zip_list if zip_vol_path.endswith('/')]
num_vols_in_zip = len(zip_volume_paths)
if not remove_headers_footers:
vols_zip.extractall(output_dir, members=zip_list)
progress.update(num_vols_in_zip)
else:
for zip_vol_path in zip_volume_paths:
sorted_vol_zip_page_paths = sorted(zip_page_path for zip_page_path in zip_list if zip_page_path.startswith(zip_vol_path) and not zip_page_path.endswith('/'))
vol_pages = [_to_htrc_page(page_path, vols_zip) for page_path in sorted_vol_zip_page_paths]
volumes.append((zip_vol_path, sorted_vol_zip_page_paths, vol_pages))
del data, vols_zip
num_missing = batch_size - num_vols_in_zip if num_vols >= batch_size else num_vols - num_vols_in_zip
progress.update(num_missing) # update progress bar state to include the missing volumes also
# `volumes` will be empty if `remove_headers_footers=False` since the ZIP was extracted
# without further processing
if volumes:
for _ in pool.imap_unordered(remove_hf_fun, volumes):
progress.update()
na_volumes_all = []
if errors:
with open(os.path.join(output_dir, 'ERROR.err'), 'w') as err_file:
err_file.write(''.join(errors))
na_volumes_error = grep_error('ERROR.err', output_dir, 'KeyNotFoundException', -1)
na_volumes_all.extend(na_volumes_error)
if rights:
with open(os.path.join(output_dir, 'volume-rights.txt'), 'w') as rights_file:
rights_file.write(''.join(rights))
if htrc.config.get_dataapi_access() == "true":
na_volumes_rights = grep_error('volume-rights.txt', output_dir, ' 3', 0)
na_volumes_all.extend(na_volumes_rights)
num_na = len(na_volumes_all)
if num_na > 0:
with open(os.path.join(output_dir, 'volumes_not_available.txt'), 'w') as volumes_na:
volumes_na.write("\n".join(str(item) for item in na_volumes_all))
if num_na < 100:
print("\nThe following volume ids are not available. \n Please check volumes_not_available.txt "
"for the complete list. ")
print('\n'.join(str(item) for item in na_volumes_all))
else:
print("\nThere are {:,} unavailable volumes.\n Please check volumes_not_available.txt "
"for the "
"complete list. \nTo check the validity of volumes in your workset or volume id file go "
"to:\n "
"https://analytics.hathitrust.org/validateworkset \n or email us at "
"<EMAIL> "
"for assistance.".format(num_na))
except socket.error:
raise RuntimeError("HTRC Data API time out. Check your inode usage if downloading a large workset. "
"Contact HTRC for further help.")
else:
raise RuntimeError("Failed to obtain the JWT token.")
def _remove_headers_footers_and_save(vol_data, concat, hf_min_similarity, hf_window_size, skip_removed_hf, output_dir):
zip_vol_path, sorted_vol_zip_page_paths, vol_pages = vol_data
clean_volid = zip_vol_path[:-1]
vol_pages = parse_page_structure(vol_pages, window_size=hf_window_size, min_similarity_ratio=hf_min_similarity)
pages_body = (page.body for page in vol_pages)
# save the removed headers/footers for user inspection
if skip_removed_hf:
if concat:
with open(os.path.join(output_dir, clean_volid + '.txt'), 'w', encoding='utf-8') as vol_file:
vol_file.write('\n'.join(pages_body))
else:
vol_path = os.path.join(output_dir, zip_vol_path)
os.mkdir(vol_path)
for vol_page_path, page_body in zip(sorted_vol_zip_page_paths, pages_body):
with open(os.path.join(output_dir, vol_page_path), 'w', encoding='utf-8') as page_file:
page_file.write(page_body)
else:
if concat:
with open(os.path.join(output_dir, clean_volid + '.txt'), 'w', encoding='utf-8') as vol_file:
vol_file.write('\n'.join(pages_body))
else:
vol_path = os.path.join(output_dir, zip_vol_path)
os.mkdir(vol_path)
for vol_page_path, page_body in zip(sorted_vol_zip_page_paths, pages_body):
with open(os.path.join(output_dir, vol_page_path), 'w', encoding='utf-8') as page_file:
page_file.write(page_body)
removed_hf = []
for vol_page_path, vol_page | |
import os
import sys
import datetime
import numpy as np
import scipy.signal
import pandas as pd
import yfinance as yf
from contextlib import contextmanager
from src.utils_date import add_days
from src.utils_date import prev_weekday
#from pandas_datareader.nasdaq_trader import get_nasdaq_symbols
ERROR_NO_MINUTE_DATA_YTD = 'Skip: Missing minute-level data for yesterday'
ERROR_NO_MINUTE_DATA_TDY = 'Skip: Missing minute-level data for today'
ERROR_CANDLES_PER_DAY = 'Skip: Insufficient candles today ({} less than {})'
ERROR_NULL_COL = 'Skip: NULL value in df_i columns ({})'
ERROR_NULL_DAY_LEVEL_IND = 'Skip: NULL value in day-level indicators'
ERROR_PRICES_D_NOT_UPDATE = 'Error: prices_d not updated, latest date found: {}'
@contextmanager
def suppress_stdout():
'''Decorator to supress function output to sys.stdout'''
with open(os.devnull, "w") as devnull:
old_stdout = sys.stdout
sys.stdout = devnull
try:
yield
finally:
sys.stdout = old_stdout
def get_ls_sym():
'''Returns list of tickers from nasdaqtrader.com
Duplicates and strings with length > 5 are removed
Returns:
ls_sym (List of str)
'''
#df_symbols = get_nasdaq_symbols()
#ls_sym = df_symbols.index.to_list()
ls_urls = [
'http://ftp.nasdaqtrader.com/dynamic/SymDir/nasdaqlisted.txt'
,'http://ftp.nasdaqtrader.com/dynamic/SymDir/otherlisted.txt'
]
ls_sym = []
for i, url in enumerate(ls_urls):
df = pd.read_csv(url, sep='|')
for col in list(df):
if col in ['ACT Symbol', 'Symbol']: df['sym'] = df[col]
ls_sym+=df[df['sym'].str.len()<=5]['sym'].to_list()
ls_sym = list(set(ls_sym)) # remove duplicates
return ls_sym
def get_df_prices(sym, start_str, end_str):
'''Return dataframe with minute-level stock price data
from start date to end date (inclusive).
Args:
sym (str): Ticker symbol e.g. 'BYND'
start_str (str): Start date string e.g. '2020-07-18'
end_str (str): End date string e.g. '2020-07-18'
Returns:
df (pandas.Dataframe)
'''
assert start_str <= end_str
end_str_mod=add_days(end_str, 3)
with suppress_stdout():
df = yf.download(sym,
start=start_str,
end=end_str_mod,
interval='1m',
progress=0,
prepost=True).reset_index()
is_date_range = ((df['Datetime'].dt.date.astype('str')>=start_str)
&(df['Datetime'].dt.date.astype('str')<=end_str))
df = df[is_date_range]
df['Datetime'] = df['Datetime'].dt.tz_localize(None) #remove timezone
is_reg_hours = ((df['Datetime'].dt.time.astype('str')>='09:30:00')
&(df['Datetime'].dt.time.astype('str')<='15:59:00'))
df['is_reg_hours'] = np.where(is_reg_hours, 1, 0)
df['sym'] = sym
df = df.rename(columns={
'Datetime':'datetime',
'Open':'open',
'High':'high',
'Low':'low',
'Adj Close':'adj_close',
'Volume':'volume'
})
ls_col = [
'sym',
'datetime',
'open',
'high',
'low',
'adj_close',
'volume',
'is_reg_hours',
]
return df[ls_col]
def add_rsi(df, rsi_period):
'''Returns dataframe with additional columns:
rsi (float)
Args:
df (pandas.DataFrame): Must be index sorted by datetime:
adj_close (float)
rsi_period (int): Number of rsi periods
Returns:
df (pandas.DataFrame)
'''
chg = df['adj_close'].diff(1)
gain = chg.mask(chg<0,0)
loss = chg.mask(chg>0,0)
avg_gain = gain.ewm(com=rsi_period-1, min_periods=rsi_period).mean()
avg_loss = loss.ewm(com=rsi_period-1, min_periods=rsi_period).mean()
rs = abs(avg_gain/avg_loss)
rsi = 100 - (100/(1+rs))
df['rsi14'] = rsi
return df
def add_vwap(df):
'''Returns dataframe with additional columns:
vwap (float): Volume Weighted Average Price
vwap_var (float): % variance of close from vwap
Args:
df (pandas.DataFrame): Dataframe with at least columns:
datetime
open
high
low
adj_close
volume
Returns:
df (pandas.DataFrame)
'''
df['vwap'] = (df['volume']*(df['high']+df['low']+df['adj_close'])/3).cumsum()/df['volume'].cumsum()
df['vwap'] = df['vwap'].fillna(df['adj_close'])
df['vwap_var'] = (df['adj_close']/df['vwap'])-1
return df
def get_df_i(sym, date_str, live_data, db, num_candles_min = 200):
'''Returns interim dataframe with price data and
trading indicators for input symbol and date
Args:
sym (str)
date_str (str)
live_data (int)
db (Database object)
num_candles_min (int)
Returns:
df_i (pandas.Dataframe)
'''
start_str = prev_weekday(date_str) #start 1 day early to get prev day data for rsi etc
end_str = add_days(date_str, 3) #extend end date string due to bug
if live_data:
with suppress_stdout():
df = yf.download(sym,
start=start_str,
end=end_str,
interval='1m',
prepost = False,
progress=0).reset_index()
df['Datetime'] = df['Datetime'].dt.tz_localize(None) #remove timezone
df = df.rename(columns={'Adj Close':'adj_close',
'Datetime':'datetime',
'Open':'open',
'High':'high',
'Low':'low',
'Volume':'volume'})
else:
q = '''
SELECT *
FROM prices_m
WHERE is_reg_hours = 1
AND sym='{}'
AND DATE(datetime)>='{}'
AND DATE(datetime)<='{}'
ORDER BY datetime
'''.format(sym, start_str, date_str)
df = pd.read_sql(q, db.conn)
df['datetime'] = pd.to_datetime(df['datetime'])
df['date_str'] = df['datetime'].dt.date.astype('str')
if df[df['date_str']==start_str].empty:
raise Exception(ERROR_NO_MINUTE_DATA_YTD)
if df[df['date_str']==date_str].empty:
raise Exception(ERROR_NO_MINUTE_DATA_TDY)
num_candles_today = df[df['date_str']==date_str].shape[0]
if num_candles_today<num_candles_min and not live_data:
raise Exception(ERROR_CANDLES_PER_DAY.format(num_candles_today, num_candles_min))
df = df[df['date_str']<=date_str]
df = df[df['date_str']>=start_str]
df['sma9'] = df['adj_close'].rolling(9).mean()
df['sma90'] = df['adj_close'].rolling(90).mean()
df['sma180'] = df['adj_close'].rolling(180).mean()
df['sma180'] = df['sma180'].fillna(df['sma90'])
df['sma9_var'] = (df['adj_close']/df['sma9'])-1
df['sma180_var'] = (df['adj_close']/df['sma180'])-1
df = add_rsi(df, 14)
df['spread']=((df['adj_close']/df['open'])-1).abs()
df['spread14_e']=df['spread'].ewm(span=14).mean()
df['volume14'] = df['volume'].rolling(14).mean()
df['volume34'] = df['volume'].rolling(34).mean()
df['volume14_34_var'] = (df['volume14']/df['volume34'])-1
df['volume14_34_var'] = df['volume14_34_var'].fillna(0.0)
prev_close = df[df['date_str']==start_str]['adj_close'].to_list()[-1]
prev_floor = df[df['date_str']==start_str]['adj_close'].min()
prev_ceil = df[df['date_str']==start_str]['adj_close'].max()
df['prev_close'] = prev_close
df['prev_close_var'] = df['adj_close']/prev_close - 1
df['prev_floor_var'] = (df['adj_close']/prev_floor)-1
df['prev_ceil_var'] = (df['adj_close']/prev_ceil)-1
df['candle_score'] = df['adj_close']/df['open']-1
df['prev1_candle_score'] = df['candle_score'].shift(1)
df['prev2_candle_score'] = df['candle_score'].shift(2)
df['prev3_candle_score'] = df['candle_score'].shift(3)
df = df[df['date_str']==date_str]
df = add_vwap(df)
df = df.rename(columns={'adj_close':'close'})
ls_col = [
'datetime',
'close',
'sma9',
'sma180',
'rsi14',
'vwap',
'sma9_var',
'sma180_var',
'vwap_var',
'spread14_e',
'volume14_34_var',
'prev_close',
'prev_close_var',
'prev_floor_var',
'prev_ceil_var',
'prev1_candle_score',
'prev2_candle_score',
'prev3_candle_score',
]
df = df[ls_col]
ls_col_na = df.columns[df.isna().any()].tolist()
if ls_col_na:
raise Exception(ERROR_NULL_COL.format(ls_col_na))
return df.reset_index(drop=1)
def add_peaks_valleys(df, order=5):
'''Returns Dataframe with additional columns:
peak_valley - 1 if peak, -1 if valley, 0 o.w.
Args:
df (pandas.DataFrame): Dataframe with at least columns:
datetime
close
order (int): How many points on each side to use for the comparison to consider
Returns:
df (pandas.DataFrame)
'''
df['peak_valley'] = 0
col_peak_valley = list(df).index('peak_valley')
peak_indexes = scipy.signal.argrelextrema(np.array(df['close']), np.greater, order = order)[0]
valley_indexes = scipy.signal.argrelextrema(np.array(df['close']), np.less, order = order)[0]
df.iloc[peak_indexes, col_peak_valley] = 1
df.iloc[valley_indexes, col_peak_valley] = -1
return df
def add_valley_variances(df):
'''Returns Dataframe with additional columns:
valley_close_pct_chg (float): % change in close of current and previous valley e.g. 1% -> 0.01
valley_rsi_diff (float): Change in rsi of current and previous valley
valley_interval_mins (float): Minutes since last valley
Args:
df (pandas.DataFrame): Dataframe with at least columns:
datetime
close
rsi14
peak_valley
Returns:
df (pandas.DataFrame)
'''
df['valley_close'] = np.where(df['peak_valley']==-1, df['close'], np.nan)
df['valley_rsi'] = np.where(df['peak_valley']==-1, df['rsi14'], np.nan)
df['valley_datetime'] = pd.to_datetime(np.where(df['peak_valley']==-1, df['datetime'], pd.NaT))
df['valley_close'] = df['valley_close'].ffill()
df['valley_rsi'] = df['valley_rsi'].ffill()
df['valley_datetime'] = df['valley_datetime'].ffill()
df['valley_close_pct_chg'] = df['valley_close'].pct_change()
df['valley_rsi_diff'] = df['valley_rsi'].diff()
df['valley_interval_mins'] = df['valley_datetime'].diff().astype('timedelta64[m]')
df = df.drop(columns=['valley_close'
,'valley_rsi'
,'valley_datetime'])
return df
def add_divergences(df, close_buffer=0, rsi_buffer=0):
'''Returns Dataframe with additional columns:
divergence (str):
'bull_reg' - Regular bullish divergence i.e. Lower price valleys, but rise in RSI
'bull_hid' - Hidden bullish divergence i.e. Higher price valleys, but drop in RSI
'' - No divergence
Args:
df (pandas.DataFrame): Dataframe with at least columns:
datetime
valley_close_pct_chg
valley_rsi_diff
close_buffer (float): Price change must be at least this % change to count as divergence, e.g 1.5 -> 1.5%
rsi_buffer (float): RSI change must be at least this change to count as divergence
Returns:
df (pandas.DataFrame)
'''
df['divergence'] = ''
df['divergence'] = np.where((df['valley_close_pct_chg'] < -(close_buffer/100))
&(df['valley_rsi_diff'] > rsi_buffer)
,'bull_reg'
,df['divergence'])
df['divergence'] = np.where((df['valley_close_pct_chg'] > (close_buffer/100))
&(df['valley_rsi_diff'] < -rsi_buffer)
,'bull_hid'
,df['divergence'])
return df
def add_additional_measures(df, sym):
'''Add last few features to Dataframe
Args:
df (pandas.Dataframe)
Returns:
df (pandas.Dataframe)
'''
df['mins_from_start'] = (df['datetime']-df['datetime'].min()).astype('timedelta64[m]')
df['valley_close_score'] = df['valley_close_pct_chg'].abs()*100
df['valley_rsi_score'] = df['valley_rsi_diff'].abs()
df['day_open_var'] = df['close']/df['close'].to_list()[0] - 1
df['open_from_prev_close_var'] = df['close'].to_list()[0]/df['prev_close'] - 1
df['ceil'] = df['close'].cummax()
df['ceil_var'] = df['close']/df['ceil'] - 1
df['floor'] = df['close'].cummin()
df['floor_var'] = df['close']/df['floor'] - 1
df['sym'] = sym
#df['hour_of_day'] = (df['datetime'] - pd.Timedelta(minutes=29)).dt.hour
#df['weekday'] = df['datetime'].dt.weekday.astype('category') #monday is 0
return df
def add_is_profit(df, target_profit, target_loss):
'''Returns Dataframe with additional columns, calculated based on input profit/loss parameters:
actual_buy_price (float)
profit (float)
is_profit (bool)
Args:
df (pandas.DataFrame): Sorted Dataframe with at least these columns:
close (float)
divergence (str)
target_profit (float): Target percentage profit e.g. 0.01 -> 1%
target_loss (float): Target percentage loss e.g. 0.01 -> 1%
Returns:
df (pandas.DataFrame)
'''
buy_delay = 2 #only buy after n mins
df['actual_buy_price'] = df['close'].shift(-buy_delay)
df['profit'] = None
for idx_div_row in df.index[df['divergence']!='']:
actual_buy_price = df.iloc[idx_div_row, df.columns.get_loc('actual_buy_price')]
profit = 0
for selling_price in df.iloc[idx_div_row:-buy_delay, df.columns.get_loc('actual_buy_price')]:
profit = (selling_price/actual_buy_price)-1
if profit>target_profit or profit<target_loss:
break
df.at[idx_div_row, 'profit'] = profit
df['is_profit'] = df['profit']>=target_profit
df['profit'] = df['profit'].astype('float')
return df
def get_dt_day_indicators(sym, close_latest, date_str_tdy, db):
'''Returns dictionary with day level indicators for input symbol
Args:
sym (str)
close_latest (float)
Returns:
dt_day_indicators (Dictionary)
'''
q='''
with t as (
select date, adj_close
from prices_d
where sym = '{}'
and date(date) < '{}'
order by date desc
limit 185
)
select adj_close
from t
order by date
'''.format(sym.upper(), date_str_tdy)
df = pd.read_sql(q, db.conn)
df = df.append(pd.DataFrame({'adj_close':[close_latest]}))
df['sma9'] = df['adj_close'].rolling(9).mean()
df['sma90'] = df['adj_close'].rolling(90).mean()
df['sma180'] = df['adj_close'].rolling(180).mean()
df['sma180'] = df['sma180'].fillna(df['sma90'])
df['sma9_var'] = (df['adj_close']/df['sma9'])-1
df['sma180_var'] = (df['adj_close']/df['sma180'])-1
df = add_rsi(df, 14)
ls_col = [
'sma9_var',
'sma180_var',
'rsi14',
]
if df[ls_col].iloc[-1].isnull().any():
raise Exception(ERROR_NULL_DAY_LEVEL_IND)
dt_day_indicators = dict(df[ls_col].iloc[-1])
return dt_day_indicators
def add_day_level_indicators(df_i, sym, db):
'''Returns df_interim with day-level indicators added
Args:
df_i (pandas.DataFrame)
sym (str)
Returns:
df_i (pandas.DataFrame)
'''
close_latest = df_i['close'].to_list()[0]
date_str_tdy = df_i['datetime'].to_list()[-1].strftime('%Y-%m-%d')
dt_day_indicators = get_dt_day_indicators(sym, close_latest, date_str_tdy, db)
for col, value in dt_day_indicators.items():
df_i[f'day_{col}'] = value
return df_i
def get_df_c(sym, date_str, live_data, db, target_profit, target_loss, index_limit=1000):
'''Returns df_cooked
Args:
sym (str)
date_str (str)
live_data (int)
db (DataBase object)
target_profit (float): Target percentage profit e.g. 0.01 -> 1%
target_loss (float): Target percentage | |
With
class _SpecialFunctionParameterTokenHitTests(_SimpleWordsTokenHitTests):
def __init__(self, func_name, gjs):
self.func_name = func_name
super(_SpecialFunctionParameterTokenHitTests, self).__init__(gjs)
def __within_target_function(self, token):
if token.parent and tu.is_parenthesis(token.parent):
tgtfunc = token.parent.parent
if tgtfunc and tu.is_function(tgtfunc):
fst = tu.token_next_enable(tgtfunc)
return tu.equals_ignore_case(fst.value, self.func_name)
return False
def first_test(self, token):
return super(_SpecialFunctionParameterTokenHitTests, self).first_test(token) \
and self.__within_target_function(token)
def get_next_test(self, tokens):
fnc = super(_SpecialFunctionParameterTokenHitTests, self).get_next_test(tokens)
return lambda t: fnc(t) and self.__within_target_function(t)
def is_completed(self, tokens):
return super(_SpecialFunctionParameterTokenHitTests, self).is_completed(tokens)
def is_completed_group(self, tokens, curr_stmt):
if not super(_SpecialFunctionParameterTokenHitTests, self).is_completed_group(tokens, curr_stmt):
return False
last = tokens[-1]
for tkn in tu.flatten_tokens_next(curr_stmt, last):
if tu.is_enable(tkn):
return tu.is_close_punctuation(tkn)
return False
class GroupingSpecialFunctionParameter(_BaseWordsGrouping):
"""
特殊な関数のグルーピングを拡張
ANSIのTRIM
ORACLEのTRIM・EXTRACT
PostgreSQLのSUBSTRING
など
"""
GROUP_JUDGE_SET = [
# TRIM ( [ LEADING | TRAILING | BOTH ] trim_char FROM string ) 標準SQL
_SpecialFunctionParameterTokenHitTests("TRIM", (
("LEADING", "TRAILING", "BOTH"),
tu.is_string_candidate,
"FROM",
tu.is_string_candidate,
)),
# TRIM ( trim_char FROM string )
_SpecialFunctionParameterTokenHitTests("TRIM", (
tu.is_string_candidate,
"FROM",
tu.is_string_candidate,
)),
# EXTRACT ( element FROM datetime )
_SpecialFunctionParameterTokenHitTests("EXTRACT", (
tu.is_value_candidate,
"FROM",
tu.is_identifier,
)),
# overlay(string placing string from int [for int])
_SpecialFunctionParameterTokenHitTests("overlay", (
tu.is_string_candidate,
"placing",
tu.is_string_candidate,
"from",
tu.is_number_candidate,
"for",
tu.is_number_candidate,
)),
_SpecialFunctionParameterTokenHitTests("overlay", (
tu.is_string_candidate,
"placing",
tu.is_string_candidate,
"from",
tu.is_number_candidate,
)),
# position(substring in string)
_SpecialFunctionParameterTokenHitTests("position", (
tu.is_string_candidate,
"in",
tu.is_string_candidate,
)),
# substring(string [from int] [for int])
_SpecialFunctionParameterTokenHitTests("substring", (
tu.is_string_candidate,
"from",
tu.is_number_candidate,
"for",
tu.is_number_candidate,
)),
_SpecialFunctionParameterTokenHitTests("substring", (
tu.is_string_candidate,
"from",
tu.is_number_candidate,
)),
# substring(string from pattern)
_SpecialFunctionParameterTokenHitTests("substring", (
tu.is_string_candidate,
"from",
tu.is_string_candidate,
)),
# substring(string from pattern for escape)
_SpecialFunctionParameterTokenHitTests("substring", (
tu.is_string_candidate,
"from",
tu.is_string_candidate,
"for",
tu.is_string_candidate,
)),
]
def get_group_class(self):
return SpecialFunctionParameter
def init_group_token(self, token, idx):
pass
class _GroupingCalculationTokenHitTests(_WordsTokenHitTests):
def first_test(self, token):
return tu.is_calc_operator(token)
def get_next_test(self, tokens):
last = tokens[-1]
if tu.is_value_candidate(last):
return tu.is_calc_operator
if tu.is_calc_operator(last):
return tu.is_value_candidate
return lambda t:False
def is_completed(self, tokens):
return False
# is_value_candidateから演算を始めると処理が遅いのであとで頭に追加する
def get_add_prev_tokens(self, tokens, prevs):
for i, tkn in list(enumerate(prevs))[::-1]:
if tu.is_enable(tkn):
if tu.is_value_candidate(tkn):
return prevs[i:]
break
return []
def is_completed_group(self, tokens, curr_stmt):
first = tokens[0]
if not tu.is_value_candidate(first):
return False
last = tokens[-1]
if not tu.is_value_candidate(last):
return False
tokens = [t for t in tokens if tu.is_enable]
return len(tokens) > 2
def adj_tokens(self, tokens, **_):
for i, tkn in list(enumerate(tokens))[::-1]:
if tu.is_enable(tkn):
return tokens[:i + 1]
return tokens
class GroupingCalculation(_BaseWordsGrouping):
"""
計算値のグルーピングを拡張
"""
GROUP_JUDGE_SET = [
_GroupingCalculationTokenHitTests(),
]
def get_group_class(self):
return Calculation
def init_group_token(self, token, idx):
pass
class AdjustGroupingFunction(_BaseWordsGrouping):
"""
スペースが入ったとき、Function判定されないので、Functionのグルーピングを調整
現状は下記の関数のみ対応。(T.Nameなどで当ててしまうとINSERT句で間違った判定がされる)
・COUNT
・EXISTS
・SUM
・MAX
・MIN
"""
GROUP_JUDGE_SET = [
(lambda t: tu.equals_ignore_case(t.value, "COUNT") \
and (t.ttype in T.Name or t.ttype in T.Keyword) \
and (not tu.is_identifier(t.parent)) ,
tu.is_parenthesis),
(lambda t: tu.equals_ignore_case(t.value, "EXISTS") \
and (t.ttype in T.Name or t.ttype in T.Keyword) \
and (not tu.is_identifier(t.parent)),
tu.is_parenthesis),
(lambda t: tu.equals_ignore_case(t.value, "SUM") \
and (t.ttype in T.Name or t.ttype in T.Keyword) \
and (not tu.is_identifier(t.parent)),
tu.is_parenthesis),
(lambda t: tu.equals_ignore_case(t.value, "MAX") \
and (t.ttype in T.Name or t.ttype in T.Keyword) \
and (not tu.is_identifier(t.parent)),
tu.is_parenthesis),
(lambda t: tu.equals_ignore_case(t.value, "MIN") \
and (t.ttype in T.Name or t.ttype in T.Keyword) \
and (not tu.is_identifier(t.parent)),
tu.is_parenthesis),
]
def get_group_class(self):
return sql.Function
def group_having(tlist):
def end_match(token):
stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION', 'EXCEPT', 'HAVING',
'WHEN', # for Oracle10g merge
'CONNECT', # for Oracle connect by
)
if token.match(T.Keyword, stopwords):
return True
if token.match(T.DML, ('DELETE')): # for Oracle10g merge
return True
if token.match(T.DML, ('START')): # for Oracle connect by
return True
return False
def proc(tlist):
[proc(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, Having)]
idx = 0
token = tlist.token_next_match(idx, T.Keyword, 'HAVING')
while token:
tidx = tlist.token_index(token)
end = tlist.token_matching(tidx + 1, (end_match, ))
if end is None:
end = tlist._groupable_tokens[-1]
else:
end = tlist.tokens[tlist.token_index(end) - 1]
tgroup = tlist.group_tokens(Having,
tlist.tokens_between(token, end),
ignore_ws=True)
idx = tlist.token_index(tgroup)
token = tlist.token_next_match(idx, T.Keyword, 'HAVING')
proc = SqlFormatterException.to_wrap_try_except(proc, 0)
proc(tlist)
def group_when(tlist):
def proc(tlist):
[proc(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, When)]
if not tu.is_case(tlist):
return
idx = 0
token = tlist.token_next_match(idx, T.Keyword, 'WHEN')
stopwords = ('THEN', 'END')
while token:
tidx = tlist.token_index(token)
end = tlist.token_next_match(tidx + 1, T.Keyword, stopwords)
if end is None:
end = tlist._groupable_tokens[-1]
else:
end = tlist.tokens[tlist.token_index(end) - 1]
tgroup = tlist.group_tokens(When,
tlist.tokens_between(token, end),
ignore_ws=True)
idx = tlist.token_index(tgroup)
token = tlist.token_next_match(idx, T.Keyword, 'WHEN')
proc = SqlFormatterException.to_wrap_try_except(proc, 0)
proc(tlist)
def group_on(tlist):
def end_match(token):
stopwords = ('WHERE', 'ORDER', 'GROUP', 'LIMIT', 'UNION', 'EXCEPT', 'HAVING',
'WHEN', # for Oracle10g merge
'CONNECT', # for Oracle connect by
)
if token.match(T.Keyword, stopwords):
return True
if tu.is_phrase(token):
if token.match_phrase(('ORDER', 'BY')) or token.match_phrase(('GROUP', 'BY')):
return True
return tu.is_join(token) or tu.is_mergewhen(token)
def proc(tlist):
[proc(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, On)]
idx = 0
token = tlist.token_next_match(idx, T.Keyword, 'ON')
while token:
tidx = tlist.token_index(token)
end = tlist.token_matching(tidx + 1, (end_match,))
if end is None:
end = tlist._groupable_tokens[-1]
else:
end = tlist.tokens[tlist.token_index(end) - 1]
tgroup = tlist.group_tokens(On,
tlist.tokens_between(token, end),
ignore_ws=True)
idx = tlist.token_index(tgroup)
token = tlist.token_next_match(idx, T.Keyword, 'ON')
proc = SqlFormatterException.to_wrap_try_except(proc, 0)
proc(tlist)
def group_connectby_startwith(tlist):
def start_match(token):
if tu.is_phrase(token):
return token.match_phrase(("CONNECT", "BY")) or token.match_phrase(("START", "WITH"))
def end_match(token):
stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION', 'EXCEPT', 'HAVING',
'WHEN', # for Oracle10g merge
'CONNECT', # for Oracle connect by
)
if token.match(T.Keyword, stopwords):
return True
if token.match(T.DML, ('DELETE')): # for Oracle10g merge
return True
if token.match(T.DML, ('START')): # for Oracle connect by
return True
if tu.is_phrase(token):
if token.match_phrase(("CONNECT", "BY")) or token.match_phrase(("START", "WITH")):
return True
return False
def proc(tlist):
[proc(sgroup) for sgroup in tlist.get_sublists()
if (not isinstance(sgroup, ConnectBy)) and (not isinstance(sgroup, StartWith))]
idx = 0
token = tlist.token_matching(idx, (start_match, ))
while token:
tidx = tlist.token_index(token)
end = tlist.token_matching(tidx + 1, (end_match, ))
if end is None:
end = tlist._groupable_tokens[-1]
else:
end = tlist.tokens[tlist.token_index(end) - 1]
group_class = None
if token.match_phrase(("CONNECT", "BY")):
group_class = ConnectBy
elif token.match_phrase(("START", "WITH")):
group_class = StartWith
tgroup = tlist.group_tokens(group_class,
tlist.tokens_between(token, end),
ignore_ws=True)
idx = tlist.token_index(tgroup)
token = tlist.token_matching(idx, (start_match, ))
proc = SqlFormatterException.to_wrap_try_except(proc, 0)
proc(tlist)
def group_mergeupdateinsertclause(tlist):
def get_start_match(tlist):
token = tlist.token_matching(0, (tu.is_mergewhen,))
if token:
return tu.token_next_enable(tlist, token)
return None
def get_start_match2(tlist, idx):
token = tlist.token_matching(idx, (tu.is_mergewhen, tu.is_delete_dml))
if token:
if tu.is_delete_dml(token):
return token
return tu.token_next_enable(tlist, token)
return None
def end_match(token):
return tu.is_mergewhen(token) or tu.is_delete_dml(token)
def proc(tlist):
[proc(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, MergeUpdateInsertClause)]
token = get_start_match(tlist)
while token:
tidx = tlist.token_index(token)
end = tlist.token_matching(tidx + 1, (end_match,))
if end is None:
end = tlist._groupable_tokens[-1]
else:
end = tlist.tokens[tlist.token_index(end) - 1]
tgroup = tlist.group_tokens(MergeUpdateInsertClause,
tlist.tokens_between(token, end),
ignore_ws=True)
idx = tlist.token_index(tgroup)
token = get_start_match2(tlist, idx)
proc = SqlFormatterException.to_wrap_try_except(proc, 0)
proc(tlist)
def adj_group_comparison(tlist):
"""
Comparisonのグルーピングでコメントがあると綺麗に整わないので調整する
Comparisonにならない式をグルーピング
"""
def find_comparison(tokens):
for token in tokens:
if tu.is_comparison(token):
return token
return None
def adjust_identifier_tokens(comp):
"""
identifier中のコメントを分解する
"""
for token in comp.tokens[:]:
if tu.is_identifier(token):
for tkn in token.tokens[::-1]:
if tkn.is_whitespace() or tu.is_comment(tkn):
comp.insert_after(token, tkn)
tkn.parent = token
token.tokens.remove(tkn)
else:
break
def adjust_tokens(tlist, comp_tokens):
if comp_tokens[0].is_whitespace():
comp_tokens = comp_tokens[1:]
if comp_tokens[-1].is_whitespace():
comp_tokens = comp_tokens[:-1]
comp = find_comparison(comp_tokens)
if comp:
left = True
left_idx = 0
for token in comp_tokens:
if token is comp:
left = False
elif left:
comp.insert_before(comp.tokens[left_idx], token)
left_idx += 1
tlist.tokens.remove(token)
else:
comp.tokens.append(token)
tlist.tokens.remove(token)
adjust_identifier_tokens(comp)
else:
if tu.find_comparison_operator_words(comp_tokens):
tgp = tlist.group_tokens(sql.Comparison, comp_tokens)
adjust_identifier_tokens(tgp)
def adjust_prior(tlist, comp_tokens):
"""
priorの調整
"""
prior = None
for token in comp_tokens[:]:
if not tu.is_enable(token):
continue
if prior:
tokens = tlist.tokens_between(prior, token)
if tu.is_comparison(token):
tgp = token.group_tokens(
sql.Identifier,
token.tokens_between(token.tokens[0],tu.token_next_enable(token))
)
for tkn in tokens[1::-1]:
tgp.tokens.insert(0, tkn)
tkn.parent.tokens.remove(tkn)
tkn.parent = tgp
comp_tokens.remove(tkn)
else:
idx = comp_tokens.index(prior)
comp_tokens.insert(idx, tlist.group_tokens(sql.Identifier, tokens))
for tkn in tokens:
comp_tokens.remove(tkn)
prior.ttype = T.Name # Keywordだと改行されてしまうためName属性にする
prior = None
continue
if token.ttype in T.Keyword and tu.equals_ignore_case(token.value, "PRIOR"):
prior = token
else:
prior = None
def proc(tlist):
[proc(sgroup) for sgroup in tlist.get_sublists()]
in_prior = False
target_tokens = []
if tu.is_where(tlist):
where_token = tlist.token_next_match(0, T.Keyword, 'WHERE')
where_index = tlist.token_index(where_token)
target_tokens = tlist.tokens[where_index + 1:] # where 以降を処理
elif tu.is_when(tlist):
when_token = tlist.token_next_match(0, T.Keyword, 'WHEN')
when_index = tlist.token_index(when_token)
target_tokens = tlist.tokens[when_index + 1:] # when 以降を処理
elif tu.is_having(tlist):
having_token = tlist.token_next_match(0, T.Keyword, 'HAVING')
having_index = tlist.token_index(having_token)
target_tokens = tlist.tokens[having_index + 1:] # having 以降を処理
elif tu.is_on(tlist):
on_token = tlist.token_next_match(0, T.Keyword, 'ON')
on_index = tlist.token_index(on_token)
target_tokens = tlist.tokens[on_index + 1:] # on 以降を処理
elif tu.is_connectby(tlist) or tu.is_startwith(tlist):
in_prior = tu.is_connectby(tlist) # connect byの場合はpriorを考慮
phrase_token = tlist.token_matching(0, (tu.is_phrase, ))
phrase_index = tlist.token_index(phrase_token)
target_tokens = tlist.tokens[phrase_index + | |
<filename>guild/commands/run_impl.py
# Copyright 2017-2022 RStudio, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import six
from guild import batch_util
from guild import cli
from guild import click_util
from guild import cmd_impl_support
from guild import config
from guild import flag_util
from guild import guildfile
from guild import help as helplib
from guild import op as oplib
from guild import op_cmd as op_cmd_lib
from guild import op_dep
from guild import op_util
from guild import remote
from guild import resolver as resolverlib
from guild import run as runlib
from guild import run_util
from guild import summary
from guild import util
from guild import var
from guild import yaml_util
from . import remote_impl_support
log = logging.getLogger("guild")
# Use Bayesian with gaussian process as default optimizer when opdef
# does not contain any optimizers.
#
DEFAULT_OPTIMIZER = "gp"
DEFAULT_OBJECTIVE = "loss"
FLAG_TEST_ATTRS = [
"default",
"type",
"required",
"arg_name",
"arg_skip",
"arg_switch",
"arg_split",
"env_name",
"choices",
"allow_other",
"distribution",
"max",
"min",
"null_label",
]
RESPECIFIABLE_RUN_PARAMS = {
"gpus",
"label",
"tags",
}
CORE_RUN_ATTRS = {
"cmd",
"deps",
"env",
"exit_status",
"flags",
"host",
"id",
"initialized",
"label",
"op",
"pip_freeze",
"platform",
"random_seed",
"run_params",
"sourcecode_digest",
"started",
"stopped",
"user",
"user_flags",
"vcs_commit",
}
###################################################################
# State
###################################################################
class State:
def __init__(self, args):
self.args = args
self.restart_run = None
self.proto_run = None
self.user_op = Operation()
self.batch_op = None
class Operation(oplib.Operation):
def __init__(self):
super(Operation, self).__init__()
self._run = None
self._run_is_proto = False
self._force_sourcecode = False
self._opdef = None
self._resource_flagdefs = []
self._user_flag_vals = {}
self._batch_trials = None
self._op_flag_vals = {}
self._flag_null_labels = {}
self._op_cmd = None
self._op_cmd_run_attrs = {}
self._python_requires = None
self._random_seed = None
self._max_trials = None
self._objective = None
self._label_template = None
self._label = None
self._tags = []
self._comment = None
self._output_scalars = None
self._sourcecode_root = None
self._flags_extra = None
self._delete_on_success = None
self._additional_deps = []
def _state_for_args(args):
S = State(args)
if S.args.help_op:
_op_init_opdef(S.args.opspec, S.user_op, S.args)
else:
_state_init_restart_or_proto_run(S)
_state_init_user_op(S)
_state_init_batch_op(S)
return S
def _apply_run_params_for_args(args, attrs):
"""Applies values for 'run_params' to attrs given args.
This function handles cases where args are re-specificed after
being initially specified. This occurs when a run is staged
(initial run params specification) and then started (run params
re-respecification).
There are a limited number of params that can be
re-specified. These are defined in `RESPECIFIABLE_RUN_PARAMS`.
If any of these are re-specified, run params are
updated. Otherwise, run params are not updated if they are already
specified.
"""
params = attrs.setdefault("run_params", {})
for name, val in args.as_kw().items():
if name not in params:
params[name] = val
continue
if val and name in RESPECIFIABLE_RUN_PARAMS:
params[name] = val
def _op_config_data(op):
return {
"flag-null-labels": op._flag_null_labels,
"op-cmd": op_cmd_lib.as_data(op._op_cmd),
"python-requires": op._python_requires,
"label-template": op._label_template,
"output-scalars": op._output_scalars,
"deps": op_util.op_deps_as_data(op.deps),
"sourcecode-root": op._sourcecode_root,
"flags-extra": op._flags_extra,
"delete-on-success": op._delete_on_success,
}
def _apply_op_config_data(data, op):
op._flag_null_labels = data.get("flag-null-labels")
op._op_cmd = op_cmd_lib.for_data(data.get("op-cmd"))
op._python_requires = data.get("python-requires")
op._label_template = data.get("label-template")
op._output_scalars = data.get("output-scalars")
op._sourcecode_root = data.get("sourcecode-root")
op._flags_extra = data.get("flags-extra")
op._delete_on_success = data.get("delete-on-success")
op.deps = op_util.op_deps_for_data(data.get("deps"))
# =================================================================
# State - restart / proto run
# =================================================================
def _state_init_restart_or_proto_run(S):
assert not (S.args.restart and S.args.proto)
if S.args.restart:
_state_init_restart_run(S)
elif S.args.proto:
_state_init_proto_run(S)
def _state_init_restart_run(S):
if S.args.remote:
S.restart_run = _remote_run_for_spec(S.args.restart, S.args)
else:
S.restart_run = _local_run_for_spec(S.args.restart)
_apply_restart_run_state(S)
def _state_init_proto_run(S):
if S.args.remote:
S.proto_run = _remote_run_for_spec(S.args.proto, S.args)
else:
S.proto_run = _local_run_for_spec(S.args.proto)
def _remote_run_for_spec(spec, args):
return remote_impl_support.one_run(spec, args)
def _local_run_for_spec(spec):
return util.find_apply(
[
run_util.run_for_run_dir,
run_util.marked_or_latest_run_for_opspec,
one_run,
],
spec,
)
def _apply_restart_run_state(S):
"""Applies any state from a restart run.
The only state applied from a restart run is the 'run_params'
attr. This is initialized to ensure that any original run params
are preserved from the original run spec (e.g. a staged run)
unless otherwise updated by the restart.
"""
op = S.batch_op if S.batch_op else S.user_op
assert op
assert not op.run_attrs, op.run_attrs
op.run_attrs["run_params"] = S.restart_run.get("run_params") or {}
# =================================================================
# State - user op
# =================================================================
def _state_init_user_op(S):
"""Initialize state for user op."""
_user_op_init_run(S)
_op_init_force_sourcecode(S.args.force_sourcecode, S.user_op)
_op_init_opdef(S.args.opspec, S.user_op, S.args)
_op_init_user_flags(S.args.flags, S.user_op)
_op_init_op_cmd(S.user_op, S.args)
_op_init_op_flags(S.args, S.user_op)
_op_init_config(
S.args.label,
S.args.tags,
S.args.comment,
S.args.edit_comment,
S.args.keep_run,
S.user_op,
)
_op_init_core(S.args, S.user_op)
def _user_op_init_run(S):
assert not (S.restart_run and S.proto_run)
if S.restart_run:
_user_op_init_run_(S, S.restart_run)
elif S.proto_run:
_user_op_init_run_(S, S.proto_run)
S.user_op._run_is_proto = True
def _user_op_init_run_(S, run):
if run.batch_proto:
S.user_op._run = run.batch_proto
else:
S.user_op._run = run
def _op_init_force_sourcecode(force_sourcecode_arg, op):
op._force_sourcecode = force_sourcecode_arg
# =================================================================
# Op - user flags
# =================================================================
def _op_init_user_flags(flag_args, op):
op._user_flag_vals, batch_files = split_flag_args(flag_args, op._opdef)
if batch_files:
trials = _trials_for_batch_files(batch_files)
if len(trials) == 1:
_apply_single_trial_user_flags(trials[0], op)
else:
op._batch_trials = trials
def split_flag_args(flag_args, opdef, incomplete=None, raise_parse_errors=True):
batch_files, rest_args = op_util.split_batch_files(flag_args)
assigns = _parse_assigns(rest_args, opdef, raise_parse_errors)
if incomplete:
assigns = {k: v for k, v in assigns.items() if incomplete in k}
return assigns, batch_files
def _parse_assigns(assign_args, opdef, raise_parse_errors):
assigns, errors = op_util.parse_flag_assigns(assign_args, opdef)
if errors and raise_parse_errors:
_invalid_flag_arg_error(errors)
return assigns
def _trials_for_batch_files(batch_files):
batch_files = [_resolve_batch_file(path) for path in batch_files]
try:
return op_util.trials_for_batch_files(batch_files)
except op_util.BatchFileError as e:
_batch_file_error(e)
def _resolve_batch_file(path):
resolved = os.path.join(config.cwd(), os.path.expanduser(path))
if not os.path.exists(resolved):
_no_such_batch_file_error(resolved)
return resolved
def _apply_single_trial_user_flags(trial, op):
for name, val in trial.items():
if name not in op._user_flag_vals:
op._user_flag_vals[name] = val
# =================================================================
# Op - opdef
# =================================================================
def _op_init_opdef(opspec, op, args):
if opspec:
op._opdef = opdef_for_opspec(opspec)
elif op._run:
if args.flags or args.force_sourcecode:
# We need opdef for restart/run-with-proto when user specifies
# flag values or when force-sourcecode is specified.
op._opdef = _opdef_for_run(op._run)
else:
op._opdef = _default_opdef()
def _opdef_for_run(run):
if isinstance(run, remote.RunProxy):
return _opdef_for_remote_run(run)
opspec = run.opref.to_opspec()
return opdef_for_opspec(opspec, run)
def _opdef_for_remote_run(run):
if _cwd_remote_run(run):
return opdef_for_opspec(_cwd_opspec(run.opref))
return opdef_for_opspec(run.opref.to_opspec(), run)
def _cwd_remote_run(run):
try:
gf = guildfile.for_dir(config.cwd())
except:
return False
else:
return gf.package and gf.package.name == run.opref.pkg_name
def _cwd_opspec(opref):
return "%s:%s" % (opref.model_name, opref.op_name)
def opdef_for_opspec(opspec, for_run=None):
try:
return op_util.opdef_for_opspec(opspec)
except op_util.InvalidOpSpec:
_invalid_opspec_error(opspec)
except op_util.CwdGuildfileError as e:
_guildfile_error(e.path, str(e))
except op_util.NoSuchModel:
if for_run:
_missing_run_opdef_error(opspec, for_run)
else:
_no_such_model_op_error(opspec)
except op_util.MultipleMatchingModels as e:
_multiple_models_error(e.model_ref, e.matches)
except op_util.NoSuchOperation as e:
_no_such_opdef_error(e.model, e.op_name)
except op_util.ModelOpProxyError as e:
_model_op_proxy_error(e)
def _default_opdef():
return opdef_for_opspec(None)
# =================================================================
# Op - op cmd
# =================================================================
def _op_init_op_cmd(op, args):
if op._opdef:
op._op_cmd, run_attrs = _op_cmd_for_opdef(op._opdef)
_apply_gpu_arg_env(args, op._op_cmd.cmd_env)
_apply_no_output_env(op._op_cmd.cmd_env)
if run_attrs:
op._op_cmd_run_attrs.update(run_attrs)
def _op_cmd_for_opdef(opdef):
try:
return op_util.op_cmd_for_opdef(opdef)
except op_util.InvalidOpDef as e:
_invalid_opdef_error(opdef, e.msg)
def _apply_no_output_env(op_env):
"""Applies op env 'NO_RUN_OUTPUT' to `os.environ` if defined.
Skipped if 'NO_RUN_OUTPUT' is defined in the current Guild env.
This lets the operation control whether or not run output is disabled.
We take this approach because Guild's only interface for disabling
run output is the current Guild env's 'NO_RUN_OUTPUT' value.
"""
if "NO_RUN_OUTPUT" in op_env and not "NO_RUN_OUTPUT" in os.environ:
os.environ["NO_RUN_OUTPUT"] = str(op_env["NO_RUN_OUTPUT"])
# =================================================================
# Op - op flags
# =================================================================
def _op_init_op_flags(args, op):
if op._run:
_apply_run_flags(op._run, op._op_flag_vals)
if op._opdef:
_apply_op_flag_vals_for_opdef(
op._opdef,
op._user_flag_vals,
args.force_flags or op._batch_trials,
op._op_cmd,
args,
op._resource_flagdefs,
op._op_flag_vals,
)
if args.edit_flags:
_edit_op_flags(op)
def _apply_run_flags(run, flag_vals):
flag_vals.update(run.get("flags") or {})
def _apply_op_flag_vals_for_opdef(
opdef,
user_flag_vals,
force_flags,
op_cmd,
args,
resource_flagdefs,
op_flag_vals,
):
"""Applies opdef and user-provided flags to `op_flag_vals`.
Also applies resolved resource flag defs per flag vals
`resource_flagdefs`.
Attempts to resolve operation runs and use resolve run short
IDs as applicable flag values.
Opdef is used to provide missing default values, coerce flag vals,
and validate vals. Opdef-provided flag vals are added to op flag
vals only if they are not already in op flags, or if they are in
user-provided flags. This maintains existing values (e.g. from a
restart) unless a user explicitly provides a flag value.
op_cmd is modified to include CmdFlag with arg-skip=yes for
resolved run IDs provided a flag isn't defined for the resolved
resource name. These flag values are used by Guild to resolve
resources and should not be included in flag args unless the a
flag def is explicitly provided.
"""
flag_vals, resolved_resource_flagdefs = _flag_vals_for_opdef(
opdef, user_flag_vals, force_flags
)
resource_flagdefs.extend(resolved_resource_flagdefs)
_apply_default_dep_runs(opdef, op_cmd, args, flag_vals)
for name, val in flag_vals.items():
if name in user_flag_vals or name not in op_flag_vals:
op_flag_vals[name] = val
def _flag_vals_for_opdef(opdef, user_flag_vals, force_flags):
"""Returns flag vals for opdef.
Results includes defaults for opdef overridden by user flag vals
where specified.
"""
try:
return op_util.flag_vals_for_opdef(opdef, user_flag_vals, force_flags)
except op_util.MissingRequiredFlags as e:
_missing_required_flags_error(e)
except op_util.InvalidFlagChoice as e:
_invalid_flag_choice_error(e)
except op_util.InvalidFlagValue as e:
_invalid_flag_value_error(e)
except op_util.NoSuchFlagError as e:
_no_such_flag_error(e.flag_name, opdef)
def _apply_default_dep_runs(opdef, op_cmd, args, flag_vals):
"""Applies default run IDs to | |
_preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(NetworkLoadBalancerForwardingRule, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'network_load_balancer_id',
'forwarding_rule_id',
'network_load_balancer_forwarding_rule_properties',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_networkloadbalancers_forwardingrules_patch" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_networkloadbalancers_forwardingrules_patch`") # noqa: E501
# verify the required parameter 'network_load_balancer_id' is set
if self.api_client.client_side_validation and ('network_load_balancer_id' not in local_var_params or # noqa: E501
local_var_params['network_load_balancer_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `network_load_balancer_id` when calling `datacenters_networkloadbalancers_forwardingrules_patch`") # noqa: E501
# verify the required parameter 'forwarding_rule_id' is set
if self.api_client.client_side_validation and ('forwarding_rule_id' not in local_var_params or # noqa: E501
local_var_params['forwarding_rule_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `forwarding_rule_id` when calling `datacenters_networkloadbalancers_forwardingrules_patch`") # noqa: E501
# verify the required parameter 'network_load_balancer_forwarding_rule_properties' is set
if self.api_client.client_side_validation and ('network_load_balancer_forwarding_rule_properties' not in local_var_params or # noqa: E501
local_var_params['network_load_balancer_forwarding_rule_properties'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `network_load_balancer_forwarding_rule_properties` when calling `datacenters_networkloadbalancers_forwardingrules_patch`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] > 10: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_networkloadbalancers_forwardingrules_patch`, must be a value less than or equal to `10`") # noqa: E501
if self.api_client.client_side_validation and 'depth' in local_var_params and local_var_params['depth'] < 0: # noqa: E501
raise ApiValueError("Invalid value for parameter `depth` when calling `datacenters_networkloadbalancers_forwardingrules_patch`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'datacenter_id' in local_var_params:
path_params['datacenterId'] = local_var_params['datacenter_id'] # noqa: E501
if 'network_load_balancer_id' in local_var_params:
path_params['networkLoadBalancerId'] = local_var_params['network_load_balancer_id'] # noqa: E501
if 'forwarding_rule_id' in local_var_params:
path_params['forwardingRuleId'] = local_var_params['forwarding_rule_id'] # noqa: E501
query_params = list(local_var_params.get('query_params', {}).items())
if 'pretty' in local_var_params and local_var_params['pretty'] is not None: # noqa: E501
query_params.append(('pretty', local_var_params['pretty'])) # noqa: E501
if 'depth' in local_var_params and local_var_params['depth'] is not None: # noqa: E501
query_params.append(('depth', local_var_params['depth'])) # noqa: E501
header_params = {}
if 'x_contract_number' in local_var_params:
header_params['X-Contract-Number'] = local_var_params['x_contract_number'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'network_load_balancer_forwarding_rule_properties' in local_var_params:
body_params = local_var_params['network_load_balancer_forwarding_rule_properties']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic Authentication', 'Token Authentication'] # noqa: E501
response_type = 'NetworkLoadBalancerForwardingRule'
if 'response_type' in kwargs:
response_type = kwargs['response_type']
return self.api_client.call_api(
'/datacenters/{datacenterId}/networkloadbalancers/{networkLoadBalancerId}/forwardingrules/{forwardingRuleId}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=response_type, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def datacenters_networkloadbalancers_forwardingrules_post(self, datacenter_id, network_load_balancer_id, network_load_balancer_forwarding_rule, **kwargs): # noqa: E501
"""Create NLB forwarding rules # noqa: E501
Create a forwarding rule for the specified Network Load Balancer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_networkloadbalancers_forwardingrules_post(datacenter_id, network_load_balancer_id, network_load_balancer_forwarding_rule, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param network_load_balancer_id: The unique ID of the Network Load Balancer. (required)
:type network_load_balancer_id: str
:param network_load_balancer_forwarding_rule: The forwarding rule to create. (required)
:type network_load_balancer_forwarding_rule: NetworkLoadBalancerForwardingRule
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: NetworkLoadBalancerForwardingRule
"""
kwargs['_return_http_data_only'] = True
return self.datacenters_networkloadbalancers_forwardingrules_post_with_http_info(datacenter_id, network_load_balancer_id, network_load_balancer_forwarding_rule, **kwargs) # noqa: E501
def datacenters_networkloadbalancers_forwardingrules_post_with_http_info(self, datacenter_id, network_load_balancer_id, network_load_balancer_forwarding_rule, **kwargs): # noqa: E501
"""Create NLB forwarding rules # noqa: E501
Create a forwarding rule for the specified Network Load Balancer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.datacenters_networkloadbalancers_forwardingrules_post_with_http_info(datacenter_id, network_load_balancer_id, network_load_balancer_forwarding_rule, async_req=True)
>>> result = thread.get()
:param datacenter_id: The unique ID of the data center. (required)
:type datacenter_id: str
:param network_load_balancer_id: The unique ID of the Network Load Balancer. (required)
:type network_load_balancer_id: str
:param network_load_balancer_forwarding_rule: The forwarding rule to create. (required)
:type network_load_balancer_forwarding_rule: NetworkLoadBalancerForwardingRule
:param pretty: Controls whether the response is pretty-printed (with indentations and new lines).
:type pretty: bool
:param depth: Controls the detail depth of the response objects. GET /datacenters/[ID] - depth=0: Only direct properties are included; children (servers and other elements) are not included. - depth=1: Direct properties and children references are included. - depth=2: Direct properties and children properties are included. - depth=3: Direct properties and children properties and children's children are included. - depth=... and so on
:type depth: int
:param x_contract_number: Users with multiple contracts must provide the contract number, for which all API requests are to be executed.
:type x_contract_number: int
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(NetworkLoadBalancerForwardingRule, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'datacenter_id',
'network_load_balancer_id',
'network_load_balancer_forwarding_rule',
'pretty',
'depth',
'x_contract_number'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth',
'response_type',
'query_params'
]
)
for local_var_params_key, local_var_params_val in six.iteritems(local_var_params['kwargs']):
if local_var_params_key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method datacenters_networkloadbalancers_forwardingrules_post" % local_var_params_key
)
local_var_params[local_var_params_key] = local_var_params_val
del local_var_params['kwargs']
# verify the required parameter 'datacenter_id' is set
if self.api_client.client_side_validation and ('datacenter_id' not in local_var_params or # noqa: E501
local_var_params['datacenter_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `datacenter_id` when calling `datacenters_networkloadbalancers_forwardingrules_post`") # noqa: E501
# verify the required parameter 'network_load_balancer_id' is set
if self.api_client.client_side_validation and ('network_load_balancer_id' not in local_var_params or # noqa: E501
local_var_params['network_load_balancer_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `network_load_balancer_id` when calling `datacenters_networkloadbalancers_forwardingrules_post`") # noqa: E501
# verify the required parameter 'network_load_balancer_forwarding_rule' is set
if self.api_client.client_side_validation and ('network_load_balancer_forwarding_rule' not in local_var_params or # noqa: E501
local_var_params['network_load_balancer_forwarding_rule'] is None): # noqa: E501
raise | |
be trouble reconciling regional + entire extent quivers
lon_qp = model.X[::skip_interval].values
lat_qp = model.Y[::skip_interval].values
# minshaft=.2; scale=250
for idx, pressure in enumerate(model.uwnd_vwnd_pressure_lvls):
print(f'Currently on {pressure}hpa...')
fig, gs_qp = create_multisubplot_axes(optimal_k)
for cluster in range(optimal_k):
print(f"{utils.time_now()} - Cluster {cluster}: ")
# uwnd_gridded_centroids = target_ds_withClusterLabels.sel(level=pressure).where(
# target_ds_withClusterLabels.cluster==cluster, drop=True).uwnd.mean(
# "time")[::skip_interval, ::skip_interval].values
# vwnd_gridded_centroids = target_ds_withClusterLabels.sel(level=pressure).where(
# target_ds_withClusterLabels.cluster==cluster, drop=True).vwnd.mean(
# "time")[::skip_interval, ::skip_interval].values
uwnd_gridded_centroids = target_ds_withClusterLabels.sel(level=pressure).where(
target_ds_withClusterLabels.cluster==cluster, drop=True).uwnd.mean(
"time").values
vwnd_gridded_centroids = target_ds_withClusterLabels.sel(level=pressure).where(
target_ds_withClusterLabels.cluster==cluster, drop=True).vwnd.mean(
"time").values
ax_qp = fig.add_subplot(gs_qp[cluster], projection=ccrs.PlateCarree())
ax_qp.xaxis.set_major_formatter(model.lon_formatter)
ax_qp.yaxis.set_major_formatter(model.lat_formatter)
ax_qp.set_facecolor('white')
ax_qp.add_feature(cf.LAND,facecolor='white')
ax_qp.set_extent([model.LON_W-1, model.LON_E+1, model.LAT_S-1, model.LAT_N+1])
if cluster < model.grid_width: # top ticks
ax_qp.set_xticks([model.LON_W, (model.LON_E - model.LON_W)/2 + model.LON_W, model.LON_E], crs=ccrs.PlateCarree())
ax_qp.set_xticklabels([model.LON_W, (model.LON_E - model.LON_W)/2 + model.LON_W, model.LON_E], rotation=55)
ax_qp.xaxis.tick_top()
else: ax_qp.set_xticks([])
if cluster % model.grid_width == model.grid_width-1: # right-side ticks
ax_qp.set_yticks([model.LAT_S, (model.LAT_N - model.LAT_S)/2 + model.LAT_S, model.LAT_N], crs=ccrs.PlateCarree())
ax_qp.yaxis.set_label_position("right")
ax_qp.yaxis.tick_right()
else: ax_qp.set_yticks([])
if cluster == 0: # title
ax_qp.set_title(f"Pressure: {pressure} hpa,\ncluster no.{cluster+1}", loc='left')
else: ax_qp.set_title(f"cluster no.{cluster+1}", loc='left')
time.sleep(1); gc.collect()
# wndspd = np.hypot(vwnd_gridded_centroids,uwnd_gridded_centroids)
wndspd = np.hypot(vwnd_gridded_centroids,uwnd_gridded_centroids)[::skip_interval,::skip_interval]
time.sleep(1); gc.collect()
# u = uwnd_gridded_centroids/wndspd;
# v = vwnd_gridded_centroids/wndspd;
u = uwnd_gridded_centroids[::skip_interval,::skip_interval]/wndspd
v = vwnd_gridded_centroids[::skip_interval,::skip_interval]/wndspd
spd_plot = ax_qp.contourf(lon_qp, lat_qp, wndspd, np.linspace(0,18,19),
transform=ccrs.PlateCarree(), cmap='terrain_r',
alpha=1)
Quiver = ax_qp.quiver(lon_qp, lat_qp, u, v, color='Black', minshaft=minshaft, scale=scale)
conts = ax_qp.contour(spd_plot, 'w', linewidths=.3)
ax_qp.coastlines("50m", linewidth=coastline_lw, color='orangered')
ax_qp.add_feature(cf.BORDERS, linewidth=.35, color='orangered', linestyle='dashed')
ax_qp.clabel(conts, conts.levels, inline=True, fmt='%1.f', fontsize=5)
time.sleep(1); gc.collect()
if cluster == model.cbar_pos: # cbar
axins_qp = inset_axes(ax_qp, width='100%', height='100%',
loc='lower left', bbox_to_anchor=(0, -.8, model.grid_width, .1),
bbox_transform=ax_qp.transAxes)
cbar_qp = fig.colorbar(spd_plot, cax=axins_qp, label='Quiver (m/s)', orientation='horizontal',pad=0.01)
cbar_qp.ax.xaxis.set_ticks_position('top')
cbar_qp.ax.xaxis.set_label_position('top')
print(f"=> Quiver plots plotted for {pressure}hpa")
fig.subplots_adjust(wspace=0.05,hspace=0.3)
fn = f"{dest}/{model.month_names_joined}_qp_v5-at-{pressure}hpa_{model.gridsize}x{model.gridsize}"
fig.savefig(fn, bbox_inches='tight', pad_inches=1)
print(f'file saved @:\n{fn}')
plt.close('all')
print(f"\n\nQuiver plotting took {utils.time_since(quiverstarttime)}.\n\n")
def print_quiver_ANOM_whole(model, dest, optimal_k):
quiverstarttime = timer(); print(f'{utils.time_now()} - Finishing quiver ANOMALY plots (whole)...')
target_ds_withClusterLabels = utils.open_pickle(model.target_ds_withClusterLabels_path)
target_ds_withClusterLabels = utils.remove_expver(target_ds_withClusterLabels)
area = (model.LON_E-model.LON_W)*(model.LAT_N-model.LAT_S)
coastline_lw = .8
minshaft=2; scale=33
if area > 3000: skip_interval=4
elif 2000 < area <= 3000: skip_interval=3; coastline_lw=.6
elif 500 < area <= 2000 : skip_interval=2; minshaft=3; scale=33
else: skip_interval=1; minshaft=3; scale=33
# lon_qp = model.X[::skip_interval].values
# lat_qp = model.Y[::skip_interval].values
lon = target_ds_withClusterLabels.lon[::skip_interval]
lat = target_ds_withClusterLabels.lat[::skip_interval]
w = lon.min().data
e = lon.max().data
s = lat.min().data
n = lat.max().data
levels = [int(i) for i in np.linspace(-10,10,21)]
for idx, pressure in enumerate(model.uwnd_vwnd_pressure_lvls):
print(f'Currently on {pressure}hpa...')
fig, gs_qp = create_multisubplot_axes(optimal_k)
# uwnd_baseline = target_ds_withClusterLabels.sel(level=pressure).uwnd.mean("time")[::skip_interval, ::skip_interval].values
# vwnd_baseline = target_ds_withClusterLabels.sel(level=pressure).vwnd.mean("time")[::skip_interval, ::skip_interval].values
uwnd_baseline = target_ds_withClusterLabels.sel(level=pressure).uwnd.mean("time").values
vwnd_baseline = target_ds_withClusterLabels.sel(level=pressure).vwnd.mean("time").values
for cluster in range(optimal_k):
print(f"{utils.time_now()} - Cluster {cluster}: ")
# uwnd_gridded_centroids = target_ds_withClusterLabels.sel(level=pressure).where(
# target_ds_withClusterLabels.cluster==cluster, drop=True).uwnd.mean(
# "time")[::skip_interval, ::skip_interval].values
# vwnd_gridded_centroids = target_ds_withClusterLabels.sel(level=pressure).where(
# target_ds_withClusterLabels.cluster==cluster, drop=True).vwnd.mean(
# "time")[::skip_interval, ::skip_interval].values
uwnd_gridded_centroids = target_ds_withClusterLabels.sel(level=pressure).where(
target_ds_withClusterLabels.cluster==cluster, drop=True).uwnd.mean(
"time").values
vwnd_gridded_centroids = target_ds_withClusterLabels.sel(level=pressure).where(
target_ds_withClusterLabels.cluster==cluster, drop=True).vwnd.mean(
"time").values
uwnd_mean = uwnd_gridded_centroids - uwnd_baseline
vwnd_mean = vwnd_gridded_centroids - vwnd_baseline
ax_qp = fig.add_subplot(gs_qp[cluster], projection=ccrs.PlateCarree())
ax_qp.xaxis.set_major_formatter(model.lon_formatter)
ax_qp.yaxis.set_major_formatter(model.lat_formatter)
ax_qp.set_facecolor('white')
ax_qp.add_feature(cf.LAND,facecolor='silver')
ax_qp.set_extent([w,e,s,n])
if cluster < model.grid_width: # top ticks
ax_qp.set_xticks(np.linspace(w,e, 5), crs=ccrs.PlateCarree())
ax_qp.set_xticklabels(np.linspace(w,e, 5), rotation=55)
ax_qp.xaxis.tick_top()
else: ax_qp.set_xticks([])
if cluster % model.grid_width == model.grid_width-1: # right-side ticks
ax_qp.set_yticks(np.linspace(s,n, 5), crs=ccrs.PlateCarree())
ax_qp.yaxis.set_label_position("right")
ax_qp.yaxis.tick_right()
else: ax_qp.set_yticks([])
if cluster == 0: # title
ax_qp.set_title(f"Pressure: {pressure} hpa for model of: {model.domain[0]}S {model.domain[1]}N {model.domain[2]}W {model.domain[3]}E\ncluster no.{cluster+1}", loc='left')
else: ax_qp.set_title(f"cluster no.{cluster+1}", loc='left')
time.sleep(1); gc.collect()
wndspd = np.hypot(vwnd_mean,uwnd_mean);
# wndspd = np.hypot(vwnd_gridded_centroids,uwnd_gridded_centroids)
# u = uwnd_gridded_centroids[::skip_interval,::skip_interval]/wndspd
# v = vwnd_gridded_centroids[::skip_interval,::skip_interval]/wndspd
u = uwnd_mean/wndspd;
v = vwnd_mean/wndspd;
wndspd = wndspd[::skip_interval,::skip_interval]
u = u[::skip_interval,::skip_interval]
v = v[::skip_interval,::skip_interval]
spd_plot = ax_qp.contourf(lon, lat, wndspd, levels,
transform=ccrs.PlateCarree(), cmap='terrain_r',
alpha=1)
Quiver = ax_qp.quiver(lon, lat, u, v, color='Black', minshaft=minshaft, scale=scale)
conts = ax_qp.contour(spd_plot, 'w', linewidths=.3)
ax_qp.coastlines("50m", linewidth=coastline_lw, color='orangered')
ax_qp.add_feature(cf.BORDERS, linewidth=.35, color='orangered', linestyle='dashed')
ax_qp.clabel(conts, conts.levels, inline=True, fmt='%1.f', fontsize=5)
time.sleep(1); gc.collect()
if cluster == model.cbar_pos: # cbar
axins_qp = inset_axes(ax_qp, width='100%', height='100%',
loc='lower left', bbox_to_anchor=(0, -.8, model.grid_width, .1),
bbox_transform=ax_qp.transAxes)
cbar_qp = fig.colorbar(spd_plot, cax=axins_qp, label='Quiver (m/s)', orientation='horizontal',pad=0.01,
ticks=levels)
cbar_qp.ax.xaxis.set_ticks_position('top')
cbar_qp.ax.xaxis.set_label_position('top')
print(f"=> Quiver ANOMALY plots plotted for {pressure}hpa")
fig.subplots_adjust(wspace=0.05,hspace=0.3)
fn = f"{dest}/{model.month_names_joined}_qp_v1_ANOM-at-{pressure}hpa_{model.gridsize}x{model.gridsize}"
fig.savefig(fn, bbox_inches='tight', pad_inches=1)
print(f'file saved @:\n{fn}')
plt.close('all')
print(f"\n\nQuiver ANOMALY plotting took {utils.time_since(quiverstarttime)}.\n\n")
def print_quiver_plots_sgonly(model, dest, optimal_k):
quiverstarttime = timer(); print(f"{utils.time_now()} - Drawing quiver sub-plots (sgonly) now...")
target_ds_withClusterLabels = utils.open_pickle(model.target_ds_withClusterLabels_path)
target_ds_withClusterLabels = utils.remove_expver(target_ds_withClusterLabels)
w_lim_sg = 101
e_lim_sg = 107
s_lim_sg = -1
n_lim_sg = 4
target_ds_withClusterLabels = target_ds_withClusterLabels.sel(
lon=slice(w_lim_sg, e_lim_sg),lat=slice(n_lim_sg, s_lim_sg))
# area = (model.LON_E-model.LON_W)*(model.LAT_N-model.LAT_S)
coastline_lw = 1
# minshaft=2; scale=33
# if area > 3000: skip_interval=4
# elif 2000 < area <= 3000: skip_interval=3
# elif 500 < area <= 2000 : skip_interval=2; minshaft=3; scale=33
# else: skip_interval=1; minshaft=3; scale=33
skip_interval=1; minshaft=3; scale=10
lon_qp = target_ds_withClusterLabels.lon[::skip_interval].values
lat_qp = target_ds_withClusterLabels.lat[::skip_interval].values
# w = lon_qp.min()
# e = lon_qp.max()
# s = lat_qp.min()
# n = lat_qp.max()
w = 102
e = 105
s = 0.5
n = 2
for idx, pressure in enumerate(model.uwnd_vwnd_pressure_lvls):
print(f'Currently on {pressure}hpa...')
fig, gs_qp = create_multisubplot_axes(optimal_k)
for cluster in range(optimal_k):
print(f"{utils.time_now()} - Cluster {cluster}: ")
uwnd_gridded_centroids = target_ds_withClusterLabels.sel(level=pressure).where(
target_ds_withClusterLabels.cluster==cluster, drop=True).uwnd.mean(
"time")[::skip_interval, ::skip_interval].values
vwnd_gridded_centroids = target_ds_withClusterLabels.sel(level=pressure).where(
target_ds_withClusterLabels.cluster==cluster, drop=True).vwnd.mean(
"time")[::skip_interval, ::skip_interval].values
ax_qp = fig.add_subplot(gs_qp[cluster], projection=ccrs.PlateCarree())
ax_qp.xaxis.set_major_formatter(model.lon_formatter)
ax_qp.yaxis.set_major_formatter(model.lat_formatter)
ax_qp.set_facecolor('white')
ax_qp.add_feature(cf.LAND,facecolor='silver')
# ax_qp.set_extent([model.LON_W-1, model.LON_E+1, model.LAT_S-1, model.LAT_N+1])
# ax_qp.set_extent([103, 105, 0.5, 2])
ax_qp.set_extent([w, e, s, n])
if cluster < model.grid_width: # top ticks
ax_qp.set_xticks([w,e], crs=ccrs.PlateCarree())
ax_qp.set_xticklabels([w,e], rotation=55)
ax_qp.xaxis.tick_top()
else: ax_qp.set_xticks([])
if cluster % model.grid_width == model.grid_width-1: # right-side ticks
ax_qp.set_yticks([s,n], crs=ccrs.PlateCarree())
ax_qp.yaxis.set_label_position("right")
ax_qp.yaxis.tick_right()
else: ax_qp.set_yticks([])
if cluster == 0: # title
ax_qp.set_title(f"Pressure: {pressure} hpa,\ncluster no.{cluster+1}", loc='left')
else: ax_qp.set_title(f"cluster no.{cluster+1}", loc='left')
time.sleep(1); gc.collect()
wndspd = np.hypot(vwnd_gridded_centroids,uwnd_gridded_centroids);
time.sleep(1); gc.collect()
u = uwnd_gridded_centroids/wndspd;
v = vwnd_gridded_centroids/wndspd;
spd_plot = ax_qp.contourf(lon_qp, lat_qp, wndspd, np.linspace(0,18,19),
transform=ccrs.PlateCarree(), cmap='terrain_r',
alpha=1)
Quiver = ax_qp.quiver(lon_qp, lat_qp, u, v, color='Black', minshaft=minshaft, scale=scale)
conts = ax_qp.contour(spd_plot, 'w', linewidths=.3)
ax_qp.coastlines("50m", linewidth=coastline_lw, color='aqua')
ax_qp.add_feature(cf.BORDERS, linewidth=.5, color='k', linestyle='dashed')
ax_qp.clabel(conts, conts.levels, inline=True, fmt='%1.f', fontsize=5)
time.sleep(1); gc.collect()
if cluster == model.cbar_pos: # cbar
axins_qp = inset_axes(ax_qp, width='100%', height='100%',
loc='lower left', bbox_to_anchor=(0, -.8, model.grid_width, .1),
bbox_transform=ax_qp.transAxes)
cbar_qp = fig.colorbar(spd_plot, cax=axins_qp, label='Quiver (m/s)', orientation='horizontal',pad=0.01)
cbar_qp.ax.xaxis.set_ticks_position('top')
cbar_qp.ax.xaxis.set_label_position('top')
print(f"=> Quiver plots plotted for {pressure}hpa")
fig.subplots_adjust(wspace=0.05,hspace=0.3)
fn = f"{dest}/{model.month_names_joined}_qp_sgonly-at-{pressure}hpa_{model.gridsize}x{model.gridsize}"
fig.savefig(fn, bbox_inches='tight', pad_inches=1)
print(f'file saved @:\n{fn}')
plt.close('all')
print(f"\n\nQuiver plotting took {utils.time_since(quiverstarttime)}.\n\n")
def print_rhum_plots(model, dest, optimal_k):
rhumstarttime = timer(); print(f"{utils.time_now()} - Finishing RHUM plots...")
target_ds_withClusterLabels = utils.open_pickle(model.target_ds_withClusterLabels_path)
target_ds_withClusterLabels = utils.remove_expver(target_ds_withClusterLabels)
for idx, pressure in enumerate(model.rhum_pressure_levels):
fig, gs_rhum = create_multisubplot_axes(optimal_k)
for cluster in range(optimal_k):
rhum_gridded_centroids = target_ds_withClusterLabels.sel(level=pressure).where(
target_ds_withClusterLabels.cluster==cluster, drop=True).rhum.mean("time")
ax_rhum = fig.add_subplot(gs_rhum[cluster], projection=ccrs.PlateCarree())
ax_rhum.xaxis.set_major_formatter(model.lon_formatter)
ax_rhum.yaxis.set_major_formatter(model.lat_formatter)
ax_rhum.coastlines("50m", linewidth=.7, color='w')
ax_rhum.add_feature(cf.BORDERS, linewidth=.5, color='w', linestyle='dashed')
ax_rhum.set_facecolor('white')
ax_rhum.add_feature(cf.LAND, facecolor='k')
ax_rhum.set_extent([model.LON_W-1, model.LON_E+1, model.LAT_S-1, model.LAT_N+1])
if cluster < model.grid_width: # top ticks
ax_rhum.set_xticks([model.LON_W, (model.LON_E - model.LON_W)/2 + model.LON_W, model.LON_E], crs=ccrs.PlateCarree())
ax_rhum.set_xticklabels([model.LON_W, (model.LON_E - model.LON_W)/2 + model.LON_W, model.LON_E], rotation=55)
ax_rhum.xaxis.tick_top()
else: ax_rhum.set_xticks([])
if cluster % model.grid_width == model.grid_width-1: # right-side ticks
ax_rhum.set_yticks([model.LAT_S, (model.LAT_N - model.LAT_S)/2 + model.LAT_S, model.LAT_N], crs=ccrs.PlateCarree())
ax_rhum.yaxis.set_label_position("right")
ax_rhum.yaxis.tick_right()
else: ax_rhum.set_yticks([])
if cluster == 0: # title
ax_rhum.set_title(f"Pressure: {pressure} hpa,\ncluster no.{cluster+1}", loc='left')
else: ax_rhum.set_title(f"cluster no.{cluster+1}", loc='left')
normi = mpl.colors.Normalize(vmin=model.min_maxes['rhum_min'], vmax=model.min_maxes['rhum_max']);
Rhum = ax_rhum.contourf(model.X, model.Y, rhum_gridded_centroids,
np.linspace(model.min_maxes['rhum_min'], model.min_maxes['rhum_max'], 21),
norm=normi, cmap='jet_r')
conts = ax_rhum.contour(Rhum, 'k:', linewidths=.5)
ax_rhum.clabel(conts, conts.levels, inline=True, fmt='%1.f', fontsize=10)
if cluster == model.cbar_pos: # cbar
axins_rhum = inset_axes(ax_rhum, width='100%', height='100%',
loc='lower left', bbox_to_anchor=(0, -.8, model.grid_width, .1),
bbox_transform=ax_rhum.transAxes);
cbar_rhum = fig.colorbar(Rhum, cax=axins_rhum, label='Relative humidity (%)', orientation='horizontal', pad=0.01);
cbar_rhum.ax.xaxis.set_ticks_position('top')
cbar_rhum.ax.xaxis.set_label_position('top')
print(f"{utils.time_now()} - clus {cluster}")
print(f"==> Rhum plots plotted for {pressure}hpa")
fig.subplots_adjust(wspace=0.05,hspace=0.3)
fn = f"{dest}/{model.month_names_joined}_rhum_v3-at-{pressure}hpa_{model.gridsize}x{model.gridsize}"
fig.savefig(fn, bbox_inches='tight', pad_inches=1)
print(f'file saved @:\n{fn}')
plt.close('all')
print(f"\n\nTime taken to plot RHUM: {utils.time_since(rhumstarttime)}.")
def print_rhum_plots_sgonly(model, dest, optimal_k):
rhumstarttime = timer(); print(f"{utils.time_now()} - Finishing RHUM plots...")
target_ds_withClusterLabels = utils.open_pickle(model.target_ds_withClusterLabels_path)
target_ds_withClusterLabels = utils.remove_expver(target_ds_withClusterLabels)
w_lim_sg = 101
e_lim_sg = 107
s_lim_sg = -1
n_lim_sg = 4
target_ds_withClusterLabels = target_ds_withClusterLabels.sel(
lon=slice(w_lim_sg, e_lim_sg),lat=slice(n_lim_sg, s_lim_sg))
w = 102
e = 105
s = 0.5
n = 2
for idx, pressure in enumerate(model.rhum_pressure_levels):
fig, gs_rhum = create_multisubplot_axes(optimal_k)
for cluster in range(optimal_k):
rhum_gridded_centroids = target_ds_withClusterLabels.sel(level=pressure).where(
target_ds_withClusterLabels.cluster==cluster, drop=True).rhum.mean("time")
ax_rhum = fig.add_subplot(gs_rhum[cluster], projection=ccrs.PlateCarree())
ax_rhum.xaxis.set_major_formatter(model.lon_formatter)
ax_rhum.yaxis.set_major_formatter(model.lat_formatter)
ax_rhum.coastlines("50m", linewidth=.7, color='w')
ax_rhum.add_feature(cf.BORDERS, linewidth=.5, color='w', linestyle='dashed')
ax_rhum.set_facecolor('white')
ax_rhum.add_feature(cf.LAND, facecolor='k')
# ax_rhum.set_extent([model.LON_W-1, model.LON_E+1, model.LAT_S-1, model.LAT_N+1])
ax_rhum.set_extent([w, e, s, n])
if cluster < model.grid_width: # top ticks
ax_rhum.set_xticks([w,e], crs=ccrs.PlateCarree())
ax_rhum.set_xticklabels([w,e], rotation=55)
ax_rhum.xaxis.tick_top()
else: ax_rhum.set_xticks([])
if cluster % model.grid_width == model.grid_width-1: # right-side ticks
ax_rhum.set_yticks([s,n], crs=ccrs.PlateCarree())
ax_rhum.yaxis.set_label_position("right")
ax_rhum.yaxis.tick_right()
else: ax_rhum.set_yticks([])
if cluster == 0: # title
ax_rhum.set_title(f"Pressure: {pressure} | |
<filename>monitor_ed/ed_monitor.py
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 25 18:17:13 2021
@author: AndyWang
"""
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from datetime import datetime
from time import sleep
import pandas as pd
import numpy as np
import os
import subprocess
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
def _start_webdriver(browser, driverpath):
'''
Function to start a webdriver
Parameters
----------
browser : str
Type of the browser you want to open
driverpath : str
Path of the driver.
Returns
-------
selenium.webdriver
Webdriver object for further usage
'''
if browser.lower() == 'edge':
return webdriver.Edge(executable_path=driverpath)
elif browser.lower() == 'chrome':
return webdriver.Chrome(executable_path=driverpath)
else:
raise NotImplementedError(f'Code for {browser} is not implemented')
def _open_browser_cmd(port, cache_dir):
'''
Open chrome in debugging mode
'''
chrome_cmd = f'chrome.exe --remote-debugging-port={port} --user-data-dir="{cache_dir}"'
subprocess.Popen(chrome_cmd)
def _connect_selenium(driverpath, port, cache_dir):
'''
connect your browser to python
Returns
-------
driver: Selenium.webdriver object that is connected to your browser
'''
chrome_options = Options()
chrome_options.add_experimental_option("debuggerAddress", f"127.0.0.1:{port}")
driver = webdriver.Chrome(driverpath, options=chrome_options)
return driver
def _find_inputbox(driver, timeout=30):
'''
Find inputbox element in Ed analytics page
Parameters
----------
driver : selenium.webdriver
timeout : float/int, optional
Timeout limit for finding the element. The default is 15.
Raises
------
TimeoutError
Returns
-------
inputbox : selenium.webdriver.remote.webelement.WebElement
Input box for searching.
'''
tstart = datetime.now()
while True:
# break out the loop if
# 1) find the element successfully
# 2) reach the time limit
try:
inputbox = driver.find_element_by_tag_name('input')
return inputbox
except:
tnow = datetime.now()
if (tnow - tstart).total_seconds() > timeout:
raise TimeoutError('Check out your connection!')
sleep(0.3)
def _search_tut(inputbox, tutcode):
'''
Searching tut in Ed analytics page
Parameters
----------
inputbox : selenium.webdriver.remote.webelement.WebElement
Webelement for input box
tutcode : str
tutorial for searching.
Returns
-------
None.
'''
inputbox.clear()
inputbox.send_keys(tutcode)
def _get_header_use(thtag):
'''
Get header attribute from usetag
Parameters
----------
thtag : bs4.element.Tag
Table header tag.
Returns
-------
str
header attribute.
'''
usetag = thtag.findAll('use')
if len(usetag) == 0:
return '#'
return usetag[0].attrs['xlink:href']
def _get_tdstatus(tdtag):
'''
Get table cell content or status (for questions)
Parameters
----------
tdtag : bs4.element.Tag
table cell tag.
Returns
-------
str
table cell content or status.
'''
text = tdtag.text
if text:
if text != '\u200b':
return text
if 'class' in tdtag.attrs:
cellclass = tdtag.attrs['class']
if len(cellclass) > 1:
return cellclass[1].split('-')[-1]
return ''
def _get_tdlink(tdtag):
atags = tdtag.findAll('a')
if len(atags) > 0:
return 'https://edstem.org{}'.format(atags[0].attrs['href'])
return 'N/A'
def _get_analytics_table(driver):
'''
Get analytics table from driver
Parameters
----------
driver : selenium.webdriver
Driver that opens Ed analytics page.
Returns
-------
analytics_df : pandas.DataFrame
DataFrame for analytics table.
colattrs : list
A list of column's attribute.
'''
soup = BeautifulSoup(driver.page_source, 'lxml')
table = soup.findAll('table', attrs={'class':"lesson-analytics-table"})[0]
### get header and body tag
thead = table.findAll('thead')[0]
tbody = table.findAll('tbody')[0]
### extract info from html to list
### (Note: pandas.read_html doesn't work for this case)
# header
header = []
colattrs = []
for thtag in thead.findAll('th'):
header.append(thtag.text.strip())
colattrs.append(_get_header_use(thtag))
# body
tablecells = []
tablehtmls = []
trtags = tbody.findAll('tr')
for trtag in trtags:
rowcells = []
rowhtmls = []
tdtags = trtag.findAll('td')
for tdtag in tdtags:
rowcells.append(_get_tdstatus(tdtag))
rowhtmls.append(_get_tdlink(tdtag))
tablecells.append(rowcells)
tablehtmls.append(rowhtmls)
analytics_df = pd.DataFrame(tablecells, columns=header)
analytics_html = pd.DataFrame(tablehtmls, columns=header)
return analytics_df, analytics_html, colattrs
def _check_search_loaded(driver, tutcode):
df, _, _ = _get_analytics_table(driver)
tutcol = df['Tutorial'].apply(lambda x:x.lower())
if (tutcol != tutcode.lower()).sum() > 0:
return False
return True
def _get_online_students(analytics_df):
'''
Get students that are online
'''
opened_count = (analytics_df.iloc[:, 3:] != 'unopened').sum(axis=1)
return opened_count > 0
def _get_code_cols(colattrs):
'''
Get columns for code only
'''
code_check = []
for attr in colattrs:
if attr == '#lesson-slide-code' or attr == '#lesson-slide-postgres':
code_check.append(True)
else:
code_check.append(False)
return code_check
def _prepare_code_plotting(analytics_df, colattrs):
good_stu = _get_online_students(analytics_df)
code_check = _get_code_cols(colattrs)
cleaned_df = analytics_df.loc[good_stu, code_check]
### preparing statistics
### We use .iloc here to avoid same question in one week
stats = {'completed':[],
'attempted':[],
'opened':[],
'unopened':[],
}
for colidx in range(cleaned_df.shape[1]):
colseries = cleaned_df.iloc[:,colidx]
for status in stats:
stats[status].append((colseries == status).sum())
colnames = cleaned_df.columns.tolist()
### return values
return stats, colnames
def _plot_code_status(stats, colnames):
fig = plt.figure(figsize=(12, len(colnames)/2))
ax = fig.add_subplot(111)
ypos = range(len(colnames),0,-1)
left = np.zeros(len(colnames))
statuses = ['completed', 'attempted', 'opened', 'unopened']
barcolor = {'completed':'green',
'attempted':'orange',
'opened':'yellow',
'unopened':'white'
}
for status in statuses:
ax.barh(ypos, stats[status], left=left,
color=barcolor[status],
label=status,
edgecolor='black'
)
left = np.add(left, stats[status])
ax.set_yticks(ypos)
ax.set_yticklabels(colnames, fontsize=15)
ax.set_ylim(0.5, len(colnames)+0.5)
xlim_max = 5 * ((int(left[0]) // 5) + 1)
ax.set_xticks(range(0, xlim_max+1, 5))
ax.set_xlim(0, xlim_max)
ax.grid(axis='x', linestyle='--')
fig.savefig('Class_status.png', bbox_inches='tight', dpi=100)
plt.close()
### for printing
def _get_value_rowcol(df, value):
rowcols = []
for i in range(df.shape[0]):
for j in range(df.shape[1]):
if df.iloc[i, j] == value:
rowcols.append((i, j))
return rowcols
def _print_new_attempted(analytics_df, analytics_html, rowcols):
print('NEW ATTEMPTS'.center(70, '*'))
for row, col in rowcols:
print('{} attempted {}!\n{}\n'.format(analytics_df.iloc[row, 0],
analytics_df.columns[col],
analytics_html.iloc[row, col]
))
print('*'*70)
def _print_gone_attempted(analytics_df, rowcols):
print('THESE ATTEMPTS ARE SOLVED'.center(70, '*'))
for row, col in rowcols:
print('{} finished {}!'.format(analytics_df.iloc[row, 0],
analytics_df.columns[col]))
print('*'*70)
def _print_old_attempted(analytics_df, analytics_html, rowcols):
print('OLD ATTEMPTS'.center(70, '*'))
for row, col in rowcols:
print('{} is still trying {}!\n{}\n'.format(analytics_df.iloc[row, 0],
analytics_df.columns[col],
analytics_html.iloc[row, col]
))
print('*'*70)
def _compare_analytics_dfs(analytics_df, analytics_html, oldpath='./old_analytics_df.pickle'):
if not os.path.exists(oldpath):
rowcols = _get_value_rowcol(analytics_df, 'attempted')
_print_gone_attempted(analytics_df, [])
_print_old_attempted(analytics_df, analytics_html, [])
_print_new_attempted(analytics_df, analytics_html, rowcols)
else:
old_analytics_df = pd.read_pickle(oldpath)
oldatttab = old_analytics_df == 'attempted'
changetab = analytics_df != old_analytics_df
newatttab = analytics_df == 'attempted'
### attempts gone
goneatt_ = (oldatttab & changetab)
rowcols = _get_value_rowcol(goneatt_, True)
_print_gone_attempted(analytics_df, rowcols)
### old attempts
oldatt_ = (oldatttab & newatttab)
rowcols = _get_value_rowcol(oldatt_, True)
_print_old_attempted(analytics_df, analytics_html, rowcols)
### new attempts
newatt_ = (newatttab & changetab)
rowcols = _get_value_rowcol(newatt_, True)
_print_new_attempted(analytics_df, analytics_html, rowcols)
analytics_df.to_pickle(oldpath)
def _get_html_table(analytics_df, analytics_html, rowcols):
html_table = []
for row, col in rowcols:
name = analytics_df.iloc[row, 0]
question_name = analytics_df.columns[col]
url = analytics_html.iloc[row, col]
url = f'<a href="{url}" target="_blank">{url}</a>'
html_table.append([name, question_name, url])
return pd.DataFrame(html_table, columns=['NAME', 'QUESTION', 'WORKSPACE'])
def _make_html(analytics_df, analytics_html, oldpath):
html_content = ''
time_update = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
### The basic information for the course
tut_info = f'''<h2>TUTCODE {TUTCODE} UPDATED @ {time_update}</h2><hr>\n'''
html_content += tut_info
# if there is no old pickle
if not os.path.exists(oldpath):
### new attempts
html_content += '<h3>NEW ATTEMPTS</h3>\n'
rowcols = _get_value_rowcol(analytics_df, 'attempted')
if len(rowcols) != 0:
newatt_ = _get_html_table(analytics_df, analytics_html, rowcols)
html_content += newatt_.to_html(escape=False)
else:
html_content += '<p> no new attempts</p>\n'
###
html_content += '<hr><h3>OLD ATTEMPTS</h3>\n'
html_content += '<p> no old attempts</p>\n'
### attempts are gone
html_content += '<hr><h3>ATTEMPTS SOLVED</h3>\n'
html_content += '<p> no old attempts solved</p>\n'
else:
old_analytics_df = pd.read_pickle(oldpath)
oldatttab = old_analytics_df == 'attempted'
changetab = analytics_df != old_analytics_df
newatttab = analytics_df == 'attempted'
### new attempts
html_content += '<h3>NEW ATTEMPTS</h3>\n'
newatt_ = (newatttab & changetab)
rowcols = _get_value_rowcol(newatt_, True)
if len(rowcols) != 0:
newatt_ = _get_html_table(analytics_df, analytics_html, rowcols)
html_content += newatt_.to_html(escape=False)
else:
html_content += '<p> no new attempts</p>\n'
###
html_content += '<hr><h3>OLD ATTEMPTS</h3>\n'
oldatt_ = (oldatttab & newatttab)
rowcols = _get_value_rowcol(oldatt_, True)
if len(rowcols) != 0:
oldatt_ = _get_html_table(analytics_df, analytics_html, rowcols)
html_content += oldatt_.to_html(escape=False)
else:
html_content += '<p> no old attempts</p>\n'
### attempts are gone
html_content += '<hr><h3>ATTEMPTS SOLVED</h3>\n'
goneatt_ = (oldatttab & changetab)
rowcols = _get_value_rowcol(goneatt_, True)
if len(rowcols) != 0:
goneatt_ = _get_html_table(analytics_df, analytics_html, rowcols)
html_content += goneatt_.to_html(escape=False)
else:
html_content += '<p> no old attempts solved</p>\n'
html_content += '<hr>\n'
html_content += '<h3>CLASS MONITORING</h3>\n'
html_content += '<a href="./Class_status.png"><img src="Class_status.png" width="1000"><\a>'
with open('monitor.html', 'w', encoding='utf-8') as fp:
fp.write(html_content)
def _check_login(driver):
if 'Log in to continue' in driver.page_source:
return True
return False
def _manually_check():
### read settings
with open('./setup.py') as fp:
code = fp.read()
exec(code, globals())
if os.path.exists(OLDPICKLEPATH):
os.remove(OLDPICKLEPATH)
### start!
if not OPEN_WITH_CACHE:
driver = _start_webdriver(BROWSER, DRIVERPATH)
elif BROWSER.lower() == 'chrome':
_open_browser_cmd(PORT, CACHE_DIR)
driver = _connect_selenium(DRIVERPATH, PORT, CACHE_DIR)
else:
raise NotImplementedError('NOT IMPLEMENTED')
driver.get(EDURL)
wait = input('Please wait till the webpage responds!')
while _check_login(driver):
status_code = input('Please Log in Ed first!!!'.center(70, '+'))
print(f'The Tutorial Code is {TUTCODE}')
# tutnew = input("Input the new TUTCODE if it is not correct, or press enter")
# if tutnew:
# TUTCODE = tutnew
### starting the loop!
break_sign = ''
while break_sign != 'q':
driver.refresh()
inputbox = _find_inputbox(driver)
_search_tut(inputbox, TUTCODE)
### get analytics dataframe
while not _check_search_loaded(driver, TUTCODE):
sleep(0.3)
analytics_df, analytics_html, colattrs = _get_analytics_table(driver)
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import shutil
import tempfile
import eventlet
import fixtures
from ironicclient import exceptions
import mock
from oslo_config import cfg
from oslo_serialization import base64
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from ironic_inspector.common import ironic as ir_utils
from ironic_inspector import db
from ironic_inspector import introspection_state as istate
from ironic_inspector import node_cache
from ironic_inspector.plugins import base as plugins_base
from ironic_inspector.plugins import example as example_plugin
from ironic_inspector import process
from ironic_inspector.pxe_filter import base as pxe_filter
from ironic_inspector.test import base as test_base
from ironic_inspector import utils
CONF = cfg.CONF
class BaseTest(test_base.NodeTest):
def setUp(self):
super(BaseTest, self).setUp()
self.started_at = timeutils.utcnow()
self.all_ports = [mock.Mock(uuid=uuidutils.generate_uuid(),
address=mac) for mac in self.macs]
self.ports = [self.all_ports[1]]
self.fake_result_json = 'node json'
self.cli_fixture = self.useFixture(
fixtures.MockPatchObject(ir_utils, 'get_client', autospec=True))
self.cli = self.cli_fixture.mock.return_value
class BaseProcessTest(BaseTest):
def setUp(self):
super(BaseProcessTest, self).setUp()
self.cache_fixture = self.useFixture(
fixtures.MockPatchObject(node_cache, 'find_node', autospec=True))
self.process_fixture = self.useFixture(
fixtures.MockPatchObject(process, '_process_node', autospec=True))
self.find_mock = self.cache_fixture.mock
self.node_info = node_cache.NodeInfo(
uuid=self.node.uuid,
state=istate.States.waiting,
started_at=self.started_at)
self.node_info.finished = mock.Mock()
self.find_mock.return_value = self.node_info
self.cli.node.get.return_value = self.node
self.process_mock = self.process_fixture.mock
self.process_mock.return_value = self.fake_result_json
self.addCleanup(self._cleanup_lock, self.node_info)
def _cleanup_lock(self, node_info):
if node_info._locked:
node_info.release_lock()
class TestProcess(BaseProcessTest):
def test_ok(self):
res = process.process(self.data)
self.assertEqual(self.fake_result_json, res)
self.find_mock.assert_called_once_with(bmc_address=self.bmc_address,
mac=mock.ANY)
actual_macs = self.find_mock.call_args[1]['mac']
self.assertEqual(sorted(self.all_macs), sorted(actual_macs))
self.cli.node.get.assert_called_once_with(self.uuid)
self.process_mock.assert_called_once_with(
self.node_info, self.node, self.data)
def test_no_ipmi(self):
del self.inventory['bmc_address']
process.process(self.data)
self.find_mock.assert_called_once_with(bmc_address=None, mac=mock.ANY)
actual_macs = self.find_mock.call_args[1]['mac']
self.assertEqual(sorted(self.all_macs), sorted(actual_macs))
self.cli.node.get.assert_called_once_with(self.uuid)
self.process_mock.assert_called_once_with(self.node_info, self.node,
self.data)
def test_ipmi_not_detected(self):
self.inventory['bmc_address'] = '0.0.0.0'
process.process(self.data)
self.find_mock.assert_called_once_with(bmc_address=None, mac=mock.ANY)
actual_macs = self.find_mock.call_args[1]['mac']
self.assertEqual(sorted(self.all_macs), sorted(actual_macs))
self.cli.node.get.assert_called_once_with(self.uuid)
self.process_mock.assert_called_once_with(self.node_info, self.node,
self.data)
def test_ipmi_not_detected_with_old_field(self):
self.inventory['bmc_address'] = '0.0.0.0'
self.data['ipmi_address'] = '0.0.0.0'
process.process(self.data)
self.find_mock.assert_called_once_with(bmc_address=None, mac=mock.ANY)
actual_macs = self.find_mock.call_args[1]['mac']
self.assertEqual(sorted(self.all_macs), sorted(actual_macs))
self.cli.node.get.assert_called_once_with(self.uuid)
self.process_mock.assert_called_once_with(self.node_info, self.node,
self.data)
def test_not_found_in_cache(self):
self.find_mock.side_effect = utils.Error('not found')
self.assertRaisesRegex(utils.Error,
'not found',
process.process, self.data)
self.assertFalse(self.cli.node.get.called)
self.assertFalse(self.process_mock.called)
def test_not_found_in_ironic(self):
self.cli.node.get.side_effect = exceptions.NotFound()
self.assertRaisesRegex(utils.Error,
'Node %s was not found' % self.uuid,
process.process, self.data)
self.cli.node.get.assert_called_once_with(self.uuid)
self.assertFalse(self.process_mock.called)
self.node_info.finished.assert_called_once_with(
istate.Events.error, error=mock.ANY)
def test_already_finished(self):
self.node_info.finished_at = timeutils.utcnow()
self.assertRaisesRegex(utils.Error, 'already finished',
process.process, self.data)
self.assertFalse(self.process_mock.called)
self.assertFalse(self.find_mock.return_value.finished.called)
def test_expected_exception(self):
self.process_mock.side_effect = utils.Error('boom')
self.assertRaisesRegex(utils.Error, 'boom',
process.process, self.data)
self.node_info.finished.assert_called_once_with(
istate.Events.error, error='boom')
def test_unexpected_exception(self):
self.process_mock.side_effect = RuntimeError('boom')
with self.assertRaisesRegex(utils.Error,
'Unexpected exception') as ctx:
process.process(self.data)
self.assertEqual(500, ctx.exception.http_code)
self.node_info.finished.assert_called_once_with(
istate.Events.error,
error='Unexpected exception RuntimeError during processing: boom')
def test_hook_unexpected_exceptions(self):
for ext in plugins_base.processing_hooks_manager():
patcher = mock.patch.object(ext.obj, 'before_processing',
side_effect=RuntimeError('boom'))
patcher.start()
self.addCleanup(lambda p=patcher: p.stop())
self.assertRaisesRegex(utils.Error, 'Unexpected exception',
process.process, self.data)
self.node_info.finished.assert_called_once_with(
istate.Events.error, error=mock.ANY)
error_message = self.node_info.finished.call_args[1]['error']
self.assertIn('RuntimeError', error_message)
self.assertIn('boom', error_message)
def test_hook_unexpected_exceptions_no_node(self):
# Check that error from hooks is raised, not "not found"
self.find_mock.side_effect = utils.Error('not found')
for ext in plugins_base.processing_hooks_manager():
patcher = mock.patch.object(ext.obj, 'before_processing',
side_effect=RuntimeError('boom'))
patcher.start()
self.addCleanup(lambda p=patcher: p.stop())
self.assertRaisesRegex(utils.Error, 'Unexpected exception',
process.process, self.data)
self.assertFalse(self.node_info.finished.called)
def test_error_if_node_not_found_hook(self):
plugins_base._NOT_FOUND_HOOK_MGR = None
self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM')
self.assertRaisesRegex(utils.Error,
'Look up error: BOOM',
process.process, self.data)
@mock.patch.object(example_plugin, 'example_not_found_hook',
autospec=True)
class TestNodeNotFoundHook(BaseProcessTest):
def test_node_not_found_hook_run_ok(self, hook_mock):
CONF.set_override('node_not_found_hook', 'example', 'processing')
plugins_base._NOT_FOUND_HOOK_MGR = None
self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM')
hook_mock.return_value = node_cache.NodeInfo(
uuid=self.node.uuid,
started_at=self.started_at)
res = process.process(self.data)
self.assertEqual(self.fake_result_json, res)
hook_mock.assert_called_once_with(self.data)
def test_node_not_found_hook_run_none(self, hook_mock):
CONF.set_override('node_not_found_hook', 'example', 'processing')
plugins_base._NOT_FOUND_HOOK_MGR = None
self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM')
hook_mock.return_value = None
self.assertRaisesRegex(utils.Error,
'Node not found hook returned nothing',
process.process, self.data)
hook_mock.assert_called_once_with(self.data)
def test_node_not_found_hook_exception(self, hook_mock):
CONF.set_override('node_not_found_hook', 'example', 'processing')
plugins_base._NOT_FOUND_HOOK_MGR = None
self.find_mock.side_effect = utils.NotFoundInCacheError('BOOM')
hook_mock.side_effect = Exception('Hook Error')
self.assertRaisesRegex(utils.Error,
'Node not found hook failed: Hook Error',
process.process, self.data)
hook_mock.assert_called_once_with(self.data)
class TestUnprocessedData(BaseProcessTest):
@mock.patch.object(process, '_store_unprocessed_data', autospec=True)
def test_save_unprocessed_data(self, store_mock):
CONF.set_override('store_data', 'swift', 'processing')
expected = copy.deepcopy(self.data)
process.process(self.data)
store_mock.assert_called_once_with(mock.ANY, expected)
@mock.patch.object(process.swift, 'SwiftAPI', autospec=True)
def test_save_unprocessed_data_failure(self, swift_mock):
CONF.set_override('store_data', 'swift', 'processing')
name = 'inspector_data-%s-%s' % (
self.uuid,
process._UNPROCESSED_DATA_STORE_SUFFIX
)
swift_conn = swift_mock.return_value
swift_conn.create_object.side_effect = utils.Error('Oops')
res = process.process(self.data)
# assert store failure doesn't break processing
self.assertEqual(self.fake_result_json, res)
swift_conn.create_object.assert_called_once_with(name, mock.ANY)
@mock.patch.object(example_plugin.ExampleProcessingHook, 'before_processing',
autospec=True)
class TestStoreLogs(BaseProcessTest):
def setUp(self):
super(TestStoreLogs, self).setUp()
CONF.set_override('processing_hooks', 'ramdisk_error,example',
'processing')
self.tempdir = tempfile.mkdtemp()
self.addCleanup(lambda: shutil.rmtree(self.tempdir))
CONF.set_override('ramdisk_logs_dir', self.tempdir, 'processing')
self.logs = b'test logs'
self.data['logs'] = base64.encode_as_bytes(self.logs)
def _check_contents(self, name=None):
files = os.listdir(self.tempdir)
self.assertEqual(1, len(files))
filename = files[0]
if name is None:
self.assertTrue(filename.startswith(self.uuid),
'%s does not start with uuid' % filename)
else:
self.assertEqual(name, filename)
with open(os.path.join(self.tempdir, filename), 'rb') as fp:
self.assertEqual(self.logs, fp.read())
def test_store_on_preprocess_failure(self, hook_mock):
hook_mock.side_effect = Exception('Hook Error')
self.assertRaises(utils.Error, process.process, self.data)
self._check_contents()
def test_store_on_process_failure(self, hook_mock):
self.process_mock.side_effect = utils.Error('boom')
self.assertRaises(utils.Error, process.process, self.data)
self._check_contents()
def test_store_on_unexpected_process_failure(self, hook_mock):
self.process_mock.side_effect = RuntimeError('boom')
self.assertRaises(utils.Error, process.process, self.data)
self._check_contents()
def test_store_on_ramdisk_error(self, hook_mock):
self.data['error'] = 'boom'
self.assertRaises(utils.Error, process.process, self.data)
self._check_contents()
def test_store_find_node_error(self, hook_mock):
self.cli.node.get.side_effect = exceptions.NotFound('boom')
self.assertRaises(utils.Error, process.process, self.data)
self._check_contents()
def test_no_error_no_logs(self, hook_mock):
process.process(self.data)
self.assertEqual([], os.listdir(self.tempdir))
def test_logs_disabled(self, hook_mock):
CONF.set_override('ramdisk_logs_dir', None, 'processing')
hook_mock.side_effect = Exception('Hook Error')
self.assertRaises(utils.Error, process.process, self.data)
self.assertEqual([], os.listdir(self.tempdir))
def test_always_store_logs(self, hook_mock):
CONF.set_override('always_store_ramdisk_logs', True, 'processing')
process.process(self.data)
self._check_contents()
@mock.patch.object(os, 'makedirs', autospec=True)
@mock.patch.object(process.LOG, 'exception', autospec=True)
def test_failure_to_write(self, log_mock, makedirs_mock, hook_mock):
tempdir = tempfile.mkdtemp()
logs_dir = os.path.join(tempdir, 'I/never/exist')
CONF.set_override('always_store_ramdisk_logs', True, 'processing')
CONF.set_override('ramdisk_logs_dir', logs_dir, 'processing')
makedirs_mock.side_effect = OSError()
process.process(self.data)
os.rmdir(tempdir)
self.assertEqual([], os.listdir(self.tempdir))
self.assertTrue(makedirs_mock.called)
self.assertTrue(log_mock.called)
def test_directory_is_created(self, hook_mock):
shutil.rmtree(self.tempdir)
self.data['error'] = 'boom'
self.assertRaises(utils.Error, process.process, self.data)
self._check_contents()
def test_store_custom_name(self, hook_mock):
CONF.set_override('ramdisk_logs_filename_format',
'{uuid}-{bmc}-{mac}',
'processing')
self.process_mock.side_effect = utils.Error('boom')
self.assertRaises(utils.Error, process.process, self.data)
self._check_contents(name='%s-%s-%s' % (self.uuid,
self.bmc_address,
self.pxe_mac.replace(':', '')))
class TestProcessNode(BaseTest):
def setUp(self):
super(TestProcessNode, self).setUp()
CONF.set_override('processing_hooks',
'$processing.default_processing_hooks,example',
'processing')
self.validate_attempts = 5
self.data['macs'] = self.macs # validate_interfaces hook
self.valid_interfaces['eth3'] = {
'mac': self.macs[1], 'ip': self.ips[1], 'extra': {}, 'pxe': False
}
self.data['interfaces'] = self.valid_interfaces
self.ports = self.all_ports
self.cli.node.get_boot_device.side_effect = (
[RuntimeError()] * self.validate_attempts + [None])
self.cli.port.create.side_effect = self.ports
self.cli.node.update.return_value = self.node
self.cli.node.list_ports.return_value = []
self.useFixture(fixtures.MockPatchObject(
pxe_filter, 'driver', autospec=True))
self.useFixture(fixtures.MockPatchObject(
eventlet.greenthread, 'sleep', autospec=True))
self.node_info._state = istate.States.waiting
db.Node(uuid=self.node_info.uuid, state=self.node_info._state,
started_at=self.node_info.started_at,
finished_at=self.node_info.finished_at,
error=self.node_info.error).save(self.session)
def test_return_includes_uuid(self):
ret_val = process._process_node(self.node_info, self.node, self.data)
self.assertEqual(self.uuid, ret_val.get('uuid'))
@mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update')
def test_wrong_provision_state(self, post_hook_mock):
self.node.provision_state = 'active'
self.assertRaises(utils.Error, process._process_node,
self.node_info, self.node, self.data)
self.assertFalse(post_hook_mock.called)
@mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update')
@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True)
def test_ok(self, finished_mock, post_hook_mock):
process._process_node(self.node_info, self.node, self.data)
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[0],
extra={},
pxe_enabled=True)
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[1],
extra={},
pxe_enabled=False)
self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off')
self.assertFalse(self.cli.node.validate.called)
post_hook_mock.assert_called_once_with(self.data, self.node_info)
finished_mock.assert_called_once_with(mock.ANY, istate.Events.finish)
def test_port_failed(self):
self.cli.port.create.side_effect = (
[exceptions.Conflict()] + self.ports[1:])
process._process_node(self.node_info, self.node, self.data)
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[0],
extra={}, pxe_enabled=True)
self.cli.port.create.assert_any_call(node_uuid=self.uuid,
address=self.macs[1],
extra={}, pxe_enabled=False)
@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True)
def test_power_off_failed(self, finished_mock):
self.cli.node.set_power_state.side_effect = RuntimeError('boom')
process._process_node(self.node_info, self.node, self.data)
self.cli.node.set_power_state.assert_called_once_with(self.uuid, 'off')
finished_mock.assert_called_once_with(
mock.ANY, istate.Events.error,
error='Failed to power off node %s, check its power '
'management configuration: boom' % self.uuid
)
@mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update')
@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True)
def test_power_off_enroll_state(self, finished_mock, post_hook_mock):
self.node.provision_state = 'enroll'
self.node_info.node = mock.Mock(return_value=self.node)
process._process_node(self.node_info, self.node, self.data)
self.assertTrue(post_hook_mock.called)
self.assertTrue(self.cli.node.set_power_state.called)
finished_mock.assert_called_once_with(
self.node_info, istate.Events.finish)
@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True)
def test_no_power_off(self, finished_mock):
CONF.set_override('power_off', False, 'processing')
process._process_node(self.node_info, self.node, self.data)
self.assertFalse(self.cli.node.set_power_state.called)
finished_mock.assert_called_once_with(
self.node_info, istate.Events.finish)
@mock.patch.object(process.swift, 'SwiftAPI', autospec=True)
def test_store_data(self, swift_mock):
CONF.set_override('store_data', 'swift', 'processing')
swift_conn = swift_mock.return_value
name = 'inspector_data-%s' % self.uuid
expected = self.data
process._process_node(self.node_info, self.node, self.data)
swift_conn.create_object.assert_called_once_with(name, mock.ANY)
self.assertEqual(expected,
json.loads(swift_conn.create_object.call_args[0][1]))
@mock.patch.object(process.swift, 'SwiftAPI', autospec=True)
def test_store_data_no_logs(self, swift_mock):
CONF.set_override('store_data', 'swift', 'processing')
swift_conn = swift_mock.return_value
name = 'inspector_data-%s' % self.uuid
self.data['logs'] = 'something'
process._process_node(self.node_info, self.node, self.data)
swift_conn.create_object.assert_called_once_with(name, mock.ANY)
self.assertNotIn('logs',
json.loads(swift_conn.create_object.call_args[0][1]))
@mock.patch.object(process.swift, 'SwiftAPI', autospec=True)
def test_store_data_location(self, swift_mock):
CONF.set_override('store_data', 'swift', 'processing')
CONF.set_override('store_data_location', 'inspector_data_object',
'processing')
swift_conn = swift_mock.return_value
name = 'inspector_data-%s' % self.uuid
patch = [{'path': '/extra/inspector_data_object',
'value': name, 'op': 'add'}]
expected = self.data
process._process_node(self.node_info, self.node, self.data)
swift_conn.create_object.assert_called_once_with(name, mock.ANY)
self.assertEqual(expected,
json.loads(swift_conn.create_object.call_args[0][1]))
self.cli.node.update.assert_any_call(self.uuid, patch)
@mock.patch.object(process, '_reapply', autospec=True)
@mock.patch.object(node_cache, 'get_node', autospec=True)
class TestReapply(BaseTest):
def prepare_mocks(func):
@six.wraps(func)
def wrapper(self, pop_mock, *args, **kw):
pop_mock.return_value = node_cache.NodeInfo(
uuid=self.node.uuid,
started_at=self.started_at)
pop_mock.return_value.finished = mock.Mock()
pop_mock.return_value.acquire_lock = mock.Mock()
return func(self, pop_mock, *args, **kw)
return wrapper
def setUp(self):
super(TestReapply, self).setUp()
CONF.set_override('store_data', 'swift', 'processing')
@prepare_mocks
def test_ok(self, pop_mock, reapply_mock):
process.reapply(self.uuid)
pop_mock.assert_called_once_with(self.uuid, locked=False)
pop_mock.return_value.acquire_lock.assert_called_once_with(
blocking=False
)
reapply_mock.assert_called_once_with(pop_mock.return_value)
@prepare_mocks
def test_locking_failed(self, pop_mock, reapply_mock):
pop_mock.return_value.acquire_lock.return_value = False
self.assertRaisesRegex(utils.Error,
'Node locked, please, try again later',
process.reapply, self.uuid)
pop_mock.assert_called_once_with(self.uuid, locked=False)
pop_mock.return_value.acquire_lock.assert_called_once_with(
blocking=False
)
@mock.patch.object(example_plugin.ExampleProcessingHook, 'before_update')
@mock.patch.object(process.rules, 'apply', autospec=True)
@mock.patch.object(process.swift, 'SwiftAPI', autospec=True)
@mock.patch.object(node_cache.NodeInfo, 'finished', autospec=True)
@mock.patch.object(node_cache.NodeInfo, 'release_lock', autospec=True)
class TestReapplyNode(BaseTest):
def setUp(self):
super(TestReapplyNode, self).setUp()
CONF.set_override('processing_hooks',
'$processing.default_processing_hooks,example',
'processing')
CONF.set_override('store_data', 'swift', 'processing')
self.data['macs'] = self.macs
self.ports = self.all_ports
self.node_info = node_cache.NodeInfo(uuid=self.uuid,
started_at=self.started_at,
node=self.node)
self.node_info.invalidate_cache = mock.Mock()
self.cli.port.create.side_effect = self.ports
self.cli.node.update.return_value = self.node
self.cli.node.list_ports.return_value = []
self.node_info._state = istate.States.finished
self.commit_fixture = self.useFixture(
fixtures.MockPatchObject(node_cache.NodeInfo, 'commit',
autospec=True))
db.Node(uuid=self.node_info.uuid, state=self.node_info._state,
started_at=self.node_info.started_at,
finished_at=self.node_info.finished_at,
error=self.node_info.error).save(self.session)
def call(self):
process._reapply(self.node_info)
# make sure node_info lock is released after a call
self.node_info.release_lock.assert_called_once_with(self.node_info)
def prepare_mocks(fn):
@six.wraps(fn)
def wrapper(self, release_mock, finished_mock, swift_mock,
*args, **kw):
finished_mock.side_effect = lambda *a, **kw: \
release_mock(self.node_info)
swift_client_mock = swift_mock.return_value
fn(self, finished_mock, swift_client_mock, *args, **kw)
return wrapper
@prepare_mocks
def test_ok(self, finished_mock, swift_mock, apply_mock, post_hook_mock):
swift_name = 'inspector_data-%s' % self.uuid
swift_mock.get_object.return_value | |
projector.brightness() == new_brightness
# Test the edge cases
brightness = projector.brightness(100)
assert brightness == 100
assert projector.brightness() == 100
brightness = projector.brightness(30)
assert brightness == 30
assert projector.brightness() == 30
# Test out of range values
with pytest.raises(PySproutError) as execinfo:
projector.brightness(29)
assert 'Parameter out of range' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
projector.brightness(101)
assert 'Parameter out of range' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
projector.brightness(1020)
assert 'Parameter out of range' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
projector.brightness(-2)
assert 'Parameter out of range' in execinfo.value.message
# Test invalid parameters
with pytest.raises(PySproutError) as execinfo:
projector.brightness("abc")
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
projector.brightness({})
assert 'Invalid parameter' in execinfo.value.message
# Set the original value back
brightness = projector.brightness(original_brightness)
assert brightness == original_brightness
assert projector.brightness() == original_brightness
def test_factory_default(get_projector):
"""
Tests the projector's factory_default method.
"""
projector = get_projector
projector_model = check_device_types.get_device_model(projector)
projector.factory_default()
assert projector.brightness() == 100
assert projector.manufacturing_data()['keystone'] == projector.keystone()
assert Projector.State.standby == projector.state()
if projector_model == Devices.projector_g2:
white_point = projector.white_point()
assert white_point['name'] == Projector.Illuminant.d65
d65 = {'name': 'd65', 'value': {'x': 0.31271, 'y': 0.32902}}
assert round(white_point['value']['x'], 5) == d65['value']['x']
assert round(white_point['value']['y'], 5) == d65['value']['y']
assert white_point['name'].value == d65['name']
def test_temperatures(get_projector):
"""
Tests the projector's temperatures method.
"""
projector = get_projector
temperatures = projector.temperatures()
info = projector.info()
check_system_types.check_TemperatureInfoList(temperatures, [info])
def test_solid_color(get_projector):
"""
Tests the projector's solid_color method.
"""
projector = get_projector
projector_model = check_device_types.get_device_model(projector)
if projector_model == Devices.projector_g1:
with pytest.raises(PySproutError) as execinfo:
projector.solid_color()
assert 'Functionality not available.' in str(execinfo.value)
return
with pytest.raises(PySproutError) as execinfo:
# solid_color will fail if the projector isn't on
projector.solid_color()
assert execinfo.value.message == 'Device is in the wrong state'
with pytest.raises(PySproutError) as execinfo:
# solid_color will fail if the projector isn't on
projector.solid_color('green')
assert execinfo.value.message == 'Device is in the wrong state'
turn_proj_on(projector, projector_model)
fw_version = get_proj_fw_version(projector)
color = projector.solid_color()
assert isinstance(color, Projector.SolidColor)
for color in Projector.SolidColor:
if color != Projector.SolidColor.off:
new_color = projector.solid_color(color)
assert new_color == color
assert isinstance(new_color.value, str)
assert color == projector.solid_color()
if check_device_types.firmware_version_at_least(fw_version, 5, 9):
assert Projector.State.solid_color == projector.state()
else:
assert Projector.State.on == projector.state()
new_color = projector.solid_color('off')
assert new_color == Projector.SolidColor.off
assert Projector.SolidColor.off == projector.solid_color()
if check_device_types.firmware_version_at_least(fw_version, 5, 9):
assert Projector.State.on == projector.state()
# Now that we're in the 'on' state, sending a solid_color 'off' should
# raise a wrong state error.
with pytest.raises(PySproutError) as execinfo:
projector.solid_color('off')
assert execinfo.value.message == 'Device is in the wrong state'
color = projector.solid_color('yellow')
assert Projector.SolidColor.yellow == color
assert Projector.SolidColor.yellow == projector.solid_color()
assert Projector.State.solid_color == projector.state()
projector.on()
assert Projector.State.on == projector.state()
assert Projector.SolidColor.off == projector.solid_color()
# When flash times out it should go back to solid color
color = projector.solid_color('blue')
assert Projector.SolidColor.blue == color
assert Projector.SolidColor.blue == projector.solid_color()
assert Projector.State.solid_color == projector.state()
projector.flash(True)
assert Projector.State.flashing == projector.state()
time.sleep(10)
assert Projector.State.solid_color == projector.state()
assert Projector.SolidColor.blue == projector.solid_color()
projector.flash(False)
assert Projector.State.flashing == projector.state()
time.sleep(10)
assert Projector.State.solid_color == projector.state()
assert Projector.SolidColor.blue == projector.solid_color()
projector.flash(True)
assert Projector.State.flashing == projector.state()
# solid_color 'off' fails if we're in the 'flashing' state
with pytest.raises(PySproutError) as execinfo:
projector.solid_color('off')
assert execinfo.value.message == 'Device is in the wrong state'
# flash(True) -> solid_color -> solid_color(off) should put us in
# the 'on' state
color = projector.solid_color('cyan')
assert Projector.SolidColor.cyan == color
assert Projector.SolidColor.cyan == projector.solid_color()
assert Projector.State.solid_color == projector.state()
color = projector.solid_color('off')
assert Projector.SolidColor.off == color
assert Projector.SolidColor.off == projector.solid_color()
assert Projector.State.on == projector.state()
projector.flash(False)
assert Projector.State.flashing == projector.state()
# solid_color 'off' fails if we're in the 'flashing' state
with pytest.raises(PySproutError) as execinfo:
projector.solid_color('off')
assert execinfo.value.message == 'Device is in the wrong state'
# flash(False) -> solid_color -> solid_color(off) should put us in
# the 'on' state
color = projector.solid_color('magenta')
assert Projector.SolidColor.magenta == color
assert Projector.SolidColor.magenta == projector.solid_color()
assert Projector.State.solid_color == projector.state()
color = projector.solid_color('off')
assert Projector.SolidColor.off == color
assert Projector.SolidColor.off == projector.solid_color()
assert Projector.State.on == projector.state()
projector.grayscale()
assert Projector.State.grayscale == projector.state()
# solid_color 'off' fails if we're in the 'grayscale' state
with pytest.raises(PySproutError) as execinfo:
projector.solid_color('off')
assert execinfo.value.message == 'Device is in the wrong state'
# grayscale -> solid_color -> solid_color(off) should put us back in
# the 'grayscale' state
color = projector.solid_color('green')
assert Projector.SolidColor.green == color
assert Projector.SolidColor.green == projector.solid_color()
assert Projector.State.solid_color == projector.state()
color = projector.solid_color('off')
assert Projector.SolidColor.off == color
assert Projector.SolidColor.off == projector.solid_color()
assert Projector.State.grayscale == projector.state()
# Verify invalid values are rejected (by hippy)
with pytest.raises(ValueError) as execinfo:
projector.solid_color('fake')
with pytest.raises(ValueError) as execinfo:
projector.solid_color(3)
with pytest.raises(ValueError) as execinfo:
projector.solid_color({})
with pytest.raises(ValueError) as execinfo:
projector.solid_color('gray')
# Send bad values to SoHal (bypassing the hippy enum check) and make
# sure SoHal throws an error...
with pytest.raises(PySproutError) as execinfo:
projector._send_msg('solid_color', 'fake') # pylint: disable=protected-access
with pytest.raises(PySproutError) as execinfo:
projector._send_msg('solid_color', 2) # pylint: disable=protected-access
with pytest.raises(PySproutError) as execinfo:
projector._send_msg('solid_color', {}) # pylint: disable=protected-access
def test_led_times(get_projector):
"""
Tests the projector's led_times method.
"""
projector = get_projector
projector_model = check_device_types.get_device_model(projector)
if projector_model == Devices.projector_g1:
with pytest.raises(PySproutError) as execinfo:
projector.led_times()
assert 'Functionality not available.' in str(execinfo.value)
return
times = projector.led_times()
assert isinstance(times, dict)
assert isinstance(times['grayscale'], float)
assert isinstance(times['on'], float)
assert isinstance(times['flash'], float)
assert times['grayscale'] > 0
assert times['on'] > 0
assert times['flash'] > 0
def test_structured_light_mode(get_projector):
"""
Tests the projector's structured_light_mode method.
"""
projector = get_projector
projector_model = check_device_types.get_device_model(projector)
if projector_model == Devices.projector_g1:
with pytest.raises(PySproutError) as execinfo:
projector.led_times()
assert 'Functionality not available.' in str(execinfo.value)
return
with pytest.raises(PySproutError) as execinfo:
projector.structured_light_mode()
assert execinfo.value.message == 'Device is in the wrong state'
turn_proj_on(projector, projector_model)
slm = projector.structured_light_mode()
assert isinstance(slm, bool)
assert slm is False
slm = projector.structured_light_mode(True)
assert slm is True
assert projector.structured_light_mode() is True
slm = projector.structured_light_mode(False)
assert slm is False
assert projector.structured_light_mode() is False
# Turn on structured_light_mode, then turn the projector off and then
# on again. When it comes on, structured_light_mode should always be false,
# regardless of what it was previously.
slm = projector.structured_light_mode(True)
assert slm is True
assert projector.structured_light_mode() is True
projector.off()
assert Projector.State.standby == projector.state()
projector.on()
assert projector.state() in (Projector.State.on,
Projector.State.on_no_source)
assert projector.structured_light_mode() is False
# Verify that non-boolean values raise errors
with pytest.raises(PySproutError) as execinfo:
projector.structured_light_mode(0)
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
projector.structured_light_mode('fake')
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
projector.structured_light_mode({})
assert 'Invalid parameter' in execinfo.value.message
def test_white_point(get_projector):
"""
Tests the projector's white_point method.
"""
projector = get_projector
projector_model = check_device_types.get_device_model(projector)
if projector_model == Devices.projector_g1:
with pytest.raises(PySproutError) as execinfo:
projector.led_times()
assert 'Functionality not available.' in str(execinfo.value)
return
orig_wp = projector.white_point()
assert isinstance(orig_wp, dict)
assert isinstance(orig_wp['name'], Projector.Illuminant)
assert isinstance(orig_wp['value'], dict)
assert isinstance(orig_wp['value']['x'], float)
assert isinstance(orig_wp['value']['y'], float)
# Test passing in just the name
for name in Projector.Illuminant:
new_wp = projector.white_point({'name': name.value})
assert new_wp['name'] == name
new_wp = projector.white_point()
assert new_wp['name'] == name
d50 = {'name': 'd50', 'value': {'x': 0.34567, 'y': 0.35850}}
d65 = {'name': 'd65', 'value': {'x': 0.31271, 'y': 0.32902}}
d75 = {'name': 'd75', 'value': {'x': 0.29902, 'y': 0.31485}}
# This one is d55
custom = {'name': 'custom', 'value': {'x': 0.33242, 'y': 0.34743}}
# Test passing in the whole dictionary
for item in [d50, d65, d75, custom]:
new_wp = projector.white_point(item)
assert round(new_wp['value']['x'], 5) == item['value']['x']
assert round(new_wp['value']['y'], 5) == item['value']['y']
assert new_wp['name'].value == item['name']
new_wp = projector.white_point()
assert round(new_wp['value']['x'], 5) == item['value']['x']
assert round(new_wp['value']['y'], 5) == item['value']['y']
assert new_wp['name'].value == item['name']
# Verify parameters that are out of range raise errors
with pytest.raises(PySproutError) as execinfo:
projector.white_point({'name' : 'custom',
'value' : {'x' : 0.4, 'y' : 0.34743}})
assert 'Parameter out of range' in str(execinfo.value)
with pytest.raises(PySproutError) as execinfo:
projector.white_point({'name' : 'custom',
'value' : {'x' : 0.2, 'y' : 0.34743}})
assert 'Parameter out of range' in str(execinfo.value)
with pytest.raises(PySproutError) as execinfo:
projector.white_point({'name' : 'custom',
'value' : {'x' : 0.33242, 'y' : 0.4}})
assert 'Parameter out of range' in str(execinfo.value)
with pytest.raises(PySproutError) as execinfo:
projector.white_point({'name' : 'custom',
'value' : {'x' : 0.33242, 'y' : 0.2}})
assert 'Parameter out of range' in str(execinfo.value)
# Verify that Invalid parameters raise errors
with pytest.raises(PySproutError) as execinfo:
projector.white_point({'name' : 'custom',
'value' : {'x' : 'bad', 'y' : 0.34743}})
assert 'Invalid parameter' in execinfo.value.message
with pytest.raises(PySproutError) as execinfo:
projector.white_point({'name' : 'custom',
'value' : {'x' : 0.33242, | |
<reponame>Kyubravo/qassim-university-cs213-project
'''
This Project is by:
Name: <NAME>
ID: 391107963
Name: <NAME>
ID: 391108011
'''
import re
import csv
import matplotlib.pyplot as plt # For graphs
import tkinter as tk # for GUI
""" import pandas as pd
import matplotlib.pyplot as plt
import numpy as np """
class Book:
ListOfBooks = []
__indexList = []
# using regex
""" # Must be used first to set the catagory Index csv file
@classmethod
def openCatagoryIndexFile(cls, csvFilePath):
indexFile = open(csvFilePath)
indexReturn = indexFile.read()
catagoryRegex = re.compile(r'(\d), (\w*)')
cls.__indexList = catagoryRegex.findall(indexReturn)
indexFile.close() """
# using csv library
# Must be used first to set the catagory Index csv file
@classmethod
def CatagoryIndexFile(cls, csvFilePath):
csvIndexFile = open(csvFilePath)
indexReader = csv.DictReader(csvIndexFile) # import the csv file as a dictionary with the row names as the keys
for row in indexReader:
cls.__indexList.append((int(row['index']),row['catagory']))
csvIndexFile.close()
@classmethod
def importFromCSVfile(cls,csvFilePath):
csvIndexFile = open(csvFilePath)
indexReader = csv.DictReader(csvIndexFile) # import the csv file as a dictionary with the row names as the keys
for row in indexReader:
Book(row['index'],row['title'],row['numberOfPages'],row['catagoryIndex'])
csvIndexFile.close()
return cls.ListOfBooks # return a list of Book objects
@classmethod
def getBookByIndex(cls,bookIndex):
for book in Book.ListOfBooks:
if book.getIndex() == bookIndex:
return book
@classmethod
def getBookByTitle(cls,bookTitle):
for book in Book.ListOfBooks:
if book.getTitle() == bookTitle:
return book
@classmethod
def getCatagoryByIndex(cls,catagoryIndex):
for catagoryListing in Book.__indexList:
if catagoryListing[0] == catagoryIndex:
return catagoryListing[1]
def __init__(self, bookIndex, title, pageNum, catagoryIndex):
self.__bookIndex = bookIndex
self.__title = title
self.__pageNum = pageNum
self.__catagoryIndex = catagoryIndex
Book.ListOfBooks.append(self) # add to the list to look it up if needed using getBookByIndex()
def getIndex(self):
return self.__bookIndex
def getTitle(self):
return self.__title
def getpageNum(self):
return int(self.__pageNum)
def getCatagoryIndex(self):
return int(self.__catagoryIndex)
def getCatagoryName(self):
for i in Book.__indexList:
if int(i[0]) == self.__catagoryIndex: # check the Indice
return i[1] # if it's true, then return the catagory
class Member:
listOfMembers = [] # list of all members, static
@classmethod
def importFromCSVfile(cls,csvFilePath):
csvIndexFile = open(csvFilePath)
indexReader = csv.DictReader(csvIndexFile) # import the csv file as a dictionary with the row names as the keys
readBooksIndexRegex = re.compile(r'(\d+)') # this takes separated numbers in any format and turns them into a list.
memberObjectList = []
for row in indexReader:
readBooksIndexList = readBooksIndexRegex.findall(row['readBooksIndex'])
member = Member(row['index'],row['name'],row['phoneNumber'],row['email'])
for bookIndex in readBooksIndexList:
member.addReadBook(Book.getBookByIndex(bookIndex)) # add all the read books to the member's readbooks list
memberObjectList.append(member)
csvIndexFile.close()
return memberObjectList # return a list of all imported members
@classmethod
def getMemberByIndex(cls,memberIndex):
for member in Member.listOfMembers:
if member.getIndex() == memberIndex:
return member
def __init__(self,index, name, phoneNumber, email):
self.__index = index
self.__name = name
self.__phoneNumber = phoneNumber
self.__email = email
self.__numberOfReadPages = 0 # the pages the member read
self.readBooks = [] # keep track of books that read books for the preson
Member.listOfMembers.append(self) # add the instance of the Member class to listOfMembers which is static
def getIndex(self):
return int(self.__index)
def getName(self):
return self.__name
def getPhoneNumber(self):
return self.__phoneNumber
def getEmail(self):
return self.__email
def getNumberOfReadPages(self):
return int(self.__numberOfReadPages)
def addReadBook(self,bookClassList): # NOTE: the format addReadBook(book1,book2,...) is not supported
if(type(bookClassList) == list): # if it's a list in format addReadBook([book1,book2,...])
for book in bookClassList:
self.readBooks.append(book) # add the book to the list of the member
self.__numberOfReadPages += book.getpageNum() # count read pages for the member
elif(type(bookClassList) == Book): # if it's a Book object in format addReadBook(book1)
self.readBooks.append(bookClassList) # add the book to the list of the member
self.__numberOfReadPages += bookClassList.getpageNum() # count read pages for the member
elif(type(bookClassList) == int): # if it's an index of a book in format addReadbook(1)
book = Book.getBookByIndex(bookClassList) # get the book object using the index
self.readBooks.append(book) # add the book to the list of the member
self.__numberOfReadPages += book.getpageNum() # count read pages for the member
def catagoryRanking(): #ranking catagory, format [[book1,count1],[book2,count2],...]
rawCatagoryList = [] #list every Catagory "with" repeating
for member in Member.listOfMembers: # list every get Catagory Index read book "with" repeating
for book in member.readBooks:
rawCatagoryList.append(book.getCatagoryIndex())
IndexList = [] # list every Catagory Index "without" repeating
for i in rawCatagoryList:
if IndexList.count(i) == 0 :
IndexList.append(i)
catCount = [] # catagory rank count the read
for i in IndexList: # count how many times the catagory his been read
catCount.append(rawCatagoryList.count(i))
sortedCat = [] # final list that will be returned
for i in range(len(IndexList)): # formats the catagories in the format [[book1,count1],[book2,count2],...]
sortedCat.append((IndexList[i],catCount[i]))
sortedCat.sort(reverse=True ,key=lambda sortedCat: sortedCat[1]) # sort the list and the key lambda is telling the sort function to sort depending on the second element on the tuple
return sortedCat
def memberRankingBooks():
rawRankList= []
for member in Member.listOfMembers:
rawRankList.append((member.getName(), len(member.readBooks))) # save the length with the name of the member in one tuple in the list
rawRankList.sort(reverse=True ,key=lambda rawRankList: rawRankList[1]) # sort the list and the key lambda is telling the sort function to sort depending on the second element on the tuple
return rawRankList
def memberRankingPages():
rawRankList= []
for member in Member.listOfMembers:
rawRankList.append((member.getName(), member.getNumberOfReadPages())) # save the number of read pages of the member with the name of the member in one tuple in the list
rawRankList.sort(reverse=True ,key=lambda rawRankList: rawRankList[1]) # sort the list and the key lambda is telling the sort function to sort depending on the second element on the tuple
return rawRankList
def pltCatRank():
catRankRaw, catPlace, catCount = [], [], []
catRankRaw = catagoryRanking() # formats the catagories in the format [[book1,count1],[book2,count2],...]
catOrder = []
i = 0
while(i < len(catRankRaw)): # Splitting the values
catPlace.append(catRankRaw[i][0])
catCount.append(catRankRaw[i][1])
i+= 1
for i in catPlace: # covert the catagory indice to the name of the catagory
catOrder.append((Book.getCatagoryByIndex(i)))
# make the plot
plt.bar(catOrder,catCount) # (x, y)
plt.xticks(rotation=30,size=8)
plt.xlabel('Catagory Name')
plt.ylabel('Number of read catagories')
plt.show()
def pltMemberRankBook(): # plt the rank of the members dependenig on how many book have been read
memberRankRaw, memberPlace, memberCount = [], [], []
memberRankRaw = memberRankingBooks()
i = 0
while(i < len(memberRankRaw)): # Splitting the values
memberPlace.append(memberRankRaw[i][0])
memberCount.append(memberRankRaw[i][1])
i += 1
plt.bar(memberPlace,memberCount) # (x, y)
plt.xticks(rotation=30,size=8)
plt.xlabel('member Name')
plt.ylabel('Number of read books')
plt.show()
def pltMemberRankPages():
memberRankRaw, memberPlace, memberCount = [], [], []
memberRankRaw = memberRankingPages()
i = 0
while(i < len(memberRankRaw)): # Splitting the values
memberPlace.append(memberRankRaw[i][0])
memberCount.append(memberRankRaw[i][1])
i += 1
plt.bar(memberPlace,memberCount) # (x, y)
plt.xticks(rotation=30,size=8)
plt.xlabel('Member name')
plt.ylabel('Number of read pages')
plt.show()
# ============ GUI ============#
def openBooksWindow(books):
window = tk.Toplevel()
window.title('Books')
mainFrame = tk.Frame(window,width=400,height=300)
mainFrame.pack(fill='both',padx=10, pady=10)
leftFrame = tk.Frame(mainFrame,width=200)
leftFrame.pack(side='left',fill='y',padx=10, pady=10)
rightFrame = tk.Frame(mainFrame,width=200)
rightFrame.pack(side='right',fill='y',padx=10, pady=10)
# left frame
lb = tk.Listbox(leftFrame, selectmode='browse')
for book in books:
lb.insert(book.getIndex(), book.getTitle())
lb.pack(fill='both')
lb.select_set(0)
#right frame
lbSelectedIndex = lb.curselection()[0]
label = tk.Label(rightFrame,text='Book index: {}\nBook title: {}\nPages: {}\n'.format(lbSelectedIndex, \
books[lbSelectedIndex].getTitle(), \
books[lbSelectedIndex].getpageNum() \
), \
justify='left',anchor='nw'
)
label.pack(fill='both')
def showInfo(): # Show info button, updating label from simply selecting something else in the listbox is impossible with tkinter library, this is a work around by using showButton and a function
lbSelectedIndex = lb.curselection()[0]
label.config(text='Book index: {}\nBook title: {}\nPages: {}\n'.format(lbSelectedIndex, \
books[lbSelectedIndex].getTitle(), \
books[lbSelectedIndex].getpageNum() \
)
)
showButton = tk.Button(rightFrame,text='Show Book Info',command=lambda: showInfo())
showButton.pack(side='bottom')
window.mainloop()
def openMembersWindow(members,books):
window = tk.Toplevel()
window.title('Members')
mainFrame = tk.Frame(window)
mainFrame.pack(fill='both',padx=10, pady=10)
leftFrame = tk.Frame(mainFrame)
leftFrame.grid(row=0,column=0,padx=10, pady=10,)
midFrame = tk.Frame(mainFrame)
midFrame.grid(row=0,column=1,padx=10, pady=10)
rightFrame = tk.Frame(mainFrame)
rightFrame.grid(row=0,column=2,padx=10, pady=10)
# left frame
leftlabel = tk.Label(leftFrame,text='Members')
leftlabel.pack(side = 'top',anchor='n')
leftlb = tk.Listbox(leftFrame)
for member in members:
leftlb.insert(member.getIndex(), member.getName())
leftlb.pack(fill='both',anchor='n')
leftlb.select_set(0)
# middle frame
midlabel = tk.Label(midFrame,text='Read books by the member')
midlabel.pack(side = 'top')
midlb = tk.Listbox(midFrame)
for book in books:
midlb.insert(book.getIndex(), book.getTitle())
midlb.pack(fill='both',side='top',expand=1)
midlb.select_set(0)
#right frame
member = Member.getMemberByIndex(midlb.curselection()[0])
label = tk.Label(rightFrame,text='Member index: {}\nName: {}\nPhone: {}\nEmail: {}'.format(member.getIndex(), \
member.getName(), \
member.getPhoneNumber(), \
member.getEmail() \
), \
justify='left',anchor='nw'
)
label.pack(fill='both')
# mainframe again
def showMemberInfo(memberIndex): # Show info button, updating label from simply selecting something else in the listbox is impossible with tkinter library, this is a work around by using showButton and a function
member = Member.getMemberByIndex(memberIndex)
| |
# type: ignore
from typing import Callable, Optional, Dict, Any, List
from radixlib.parsers.base_parser import ParserBase
import radixlib as radix
import dateparser
class DefaultParser(ParserBase):
""" A default parser used to parse the responses of the gateway API into a format that is easy
to query
"""
@classmethod
def parse(
cls,
data: Any,
data_type: str
) -> Any:
""" Routes the parsing of the data to the appropriate parsing function from within the class
This function acts as a router which tires to find the appropriate parsing function within
the class to parse the data. If no parser is implemented for this data type, then the
original data is returned without any parsing.
Args:
data (Any): Data of any type to pass to the parser function
data_type (str): Type of the data or the origin of the data
Returns:
Any: The parsed data
"""
# Getting the parsing function for this data type from the attributes of the class
function_name: str = f'parse_{data_type}'
parsing_function: Optional[Callable[..., Any]] = getattr(cls, function_name, None)
# We try calling the parsing function with the data that we have. If the parsing function
# works, then we return the parsed data. However, if a TypeError or NotImplementedError is
# raised, then we return the original data
try:
parsed_data: Any = parsing_function(data) # type: ignore
return parsed_data if parsed_data is not None else data
except (TypeError, NotImplementedError):
return data
@classmethod
def parse_get_gateway_info(cls, data: Dict[str, Any]) -> Any:
""" A function used for the parsing of the get_gateway_info API calls.
This parser function produces output in the following format::
{
"network_identifier": {
"network": "mainnet"
},
"gateway_api": {
"version": "1.0.1",
"open_api_schema_version": "1.0.3"
},
"ledger_state": {
"version": 78345123,
"timestamp": "2022-02-03T15:24:35.866Z",
"epoch": 7024,
"round": 8910
},
"target_ledger_state": {
"version": 78345127
}
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
# No parsing is needed in this case, the default format the data is given in is easy to
# query.
raise NotImplementedError("No implementation for the parse_get_gateway_info")
@classmethod
def parse_derive_account_identifier(cls, data: Dict[str, Dict[str, str]]) -> str:
""" A function used for the parsing of the derive_account_identifier API calls.
Args:
data (dict): A dictionary of the data to parse.
Returns:
str: A string of the derived account address.
"""
return data['account_identifier']['address']
@classmethod
def parse_get_account_balances(cls, data: Dict[Any, Any]) -> Dict[str, Dict[str, int]]:
""" A function used for the parsing of the get_account_balances API calls.
This parser function produces output in the following format::
{
"total_balance": {
"token_rri": "balance of token"
},
"staking_balance": {
"token_rri": "balance of token"
},
"liquid_balance": {
"token_rri": "balance of token"
}
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
# Processing the balances into an easy to query dictionary format
final_balances: Dict[str, Dict[str, int]] = {
"total_balance": {},
"staking_balance": {},
"liquid_balance": {},
}
final_balances['staking_balance'][data['account_balances']['staked_and_unstaking_balance']['token_identifier']['rri']] = int(data['account_balances']['staked_and_unstaking_balance']['value'])
for token_balance in data['account_balances']['liquid_balances']:
final_balances['liquid_balance'][token_balance['token_identifier']['rri']] = int(token_balance['value'])
unique_rris: List[str] = list(set(list(final_balances['staking_balance'].keys()) + list(final_balances['liquid_balance'].keys())))
for rri in unique_rris:
balance1: Optional[int] = final_balances['staking_balance'].get(rri)
balance2: Optional[int] = final_balances['liquid_balance'].get(rri)
final_balances['total_balance'][rri] = (0 if balance1 is None else balance1) + (0 if balance2 is None else balance2)
return final_balances
@classmethod
def parse_get_stake_positions(cls, data: Dict[str, Any]) -> Dict[str, List[Dict[str, Any]]]:
""" A function used for the parsing of the get_stake_positions API calls.
This parser function produces output in the following format::
{
"pending_stakes": [
{
"validator_address": "address",
"amount": {
"xrd_rri": "amount"
},
}
],
"stakes": [
{
"validator_address": "address",
"amount": {
"xrd_rri": "amount"
},
}
]
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return {
key: list(map(lambda x: dict([
('validator_address', x['validator_identifier']['address']),
('amount', {
x['delegated_stake']['token_identifier']['rri']: int(x['delegated_stake']['value'])
})
]), value))
for key, value
in data.items()
if key in ['pending_stakes', 'stakes']
}
@classmethod
def parse_get_unstake_positions(cls, data: Dict[str, Any]) -> Dict[str, List[Dict[str, Any]]]:
""" A function used for the parsing of the get_unstake_positions API calls.
This parser function produces output in the following format::
{
"pending_unstakes": [
{
"validator_address": "address",
"amount": {
"xrd_rri": "amount"
},
"epochs_until_unlocked": "amount"
}
],
"unstakes": [
{
"validator_address": "address",
"amount": {
"xrd_rri": "amount"
},
"epochs_until_unlocked": "amount"
}
]
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return {
key: list(map(lambda x: dict([
('validator_address', x['validator_identifier']['address']),
('amount', {
x['delegated_stake']['token_identifier']['rri']: int(x['delegated_stake']['value'])
}),
('epochs_until_unlocked', x['epochs_until_unlocked']),
]), value))
for key, value
in data.items()
if key in ['pending_unstakes', 'unstakes']
}
@classmethod
def parse_get_account_transactions(cls, data: Dict[str, Any]) -> List[Dict[str, Any]]:
""" A function used for the parsing of the get_account_transactions API calls.
This parser function produces output in the following format::
[
{
"hash": data['transaction']['transaction_identifier']['hash'],
"status": data['transaction']['transaction_status']['status'],
"confirmed_time": dateparser.parse(data['transaction']['transaction_status']['confirmed_time']),
"actions": list(map(
lambda x: getattr(radix.actions, x['type']).from_dict(x),
data['transaction']['actions']
)),
"fee_paid": {
data['transaction']['fee_paid']['token_identifier']['rri']: int(data['transaction']['fee_paid']['value'])
},
"transaction_blob": data['transaction']['metadata']['hex'],
"message_blob": data['transaction']['metadata'].get('message'),
}
]
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return list(map(
lambda x: cls.parse({'transaction': x}, 'transaction_status'),
data['transactions']
))
@classmethod
def parse_get_native_token_info(cls, data: Any) -> Any:
""" A function used for the parsing of the get_native_token_info API calls.
This parser function produces output in the following format::
{
"rri": "token_rri",
"total_supply": "amount"
"total_minted": "amount"
"total_burned": "amount"
"name": "token_name"
"description": "token_description",
"icon_url": "token_icon_url",
"url": "token_url",
"symbol": "token_symbol",
"is_supply_mutable": "token_is_supply_mutable",
"granularity": "token_granularity",
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return cls.parse(data, 'get_token_info')
@classmethod
def parse_get_token_info(cls, data: Any) -> Any:
""" A function used for the parsing of the get_token_info API calls.
This parser function produces output in the following format::
{
"rri": "token_rri",
"total_supply": "amount"
"total_minted": "amount"
"total_burned": "amount"
"name": "token_name"
"description": "token_description",
"icon_url": "token_icon_url",
"url": "token_url",
"symbol": "token_symbol",
"is_supply_mutable": "token_is_supply_mutable",
"granularity": "token_granularity",
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return {
"rri": data['token']['token_identifier']['rri'],
"total_supply": int(data['token']['token_supply']['value']),
"total_minted": int(data['token']['info']['total_minted']['value']),
"total_burned": int(data['token']['info']['total_burned']['value']),
"name": data['token']['token_properties']['name'],
"description": data['token']['token_properties']['description'],
"icon_url": data['token']['token_properties']['icon_url'],
"url": data['token']['token_properties']['url'],
"symbol": data['token']['token_properties']['symbol'],
"is_supply_mutable": bool(data['token']['token_properties']['is_supply_mutable']),
"granularity": int(data['token']['token_properties']['granularity']),
}
@classmethod
def parse_derive_token_identifier(cls, data: Dict[str, Dict[str, str]]) -> str:
""" A function used for the parsing of the derive_token_identifier API calls.
Args:
data (dict): A dictionary of the data to parse.
Returns:
str: A string of the token RRI
"""
return data['token_identifier']['rri']
@classmethod
def parse_get_validator(cls, data: Dict[str, Any]) -> Dict[str, Any]:
""" A function used for the parsing of the get_validator API calls.
This parser function produces output in the following format::
{
"validator_address": "address",
"stake": {
"xrd_rri": "amount"
},
"owner_stake": {
"xrd_rri": "amount"
},
"uptime": {
"epoch_range": {
"from": "from_epoch",
"to": "to_epoch"
},
"uptime_percentage": "uptime_percentage",
"proposals_missed": "proposals_missed",
"proposals_completed": "proposals_completed"
},
"url": "url",
"validator_fee_percentage": "validator_fee_percentage",
"name": "name",
"registered": "registered",
"owner_account_address": "owner_account_address",
"external_stake_accepted": "external_stake_accepted",
}
Args:
data (dict): A dictionary of the data to parse.
Returns:
dict: A dictionary of the parsed data.
"""
return {
"validator_address": data['validator']['validator_identifier']['address'],
"stake": {
data['validator']['stake']['token_identifier']['rri']: int(data['validator']['stake']['value'])
},
"owner_stake": {
data['validator']['info']['owner_stake']['token_identifier']['rri']: int(data['validator']['info']['owner_stake']['value'])
},
"uptime": data['validator']['info']['uptime'],
"url": data['validator']['properties']['url'],
"validator_fee_percentage": data['validator']['properties']['validator_fee_percentage'],
"name": data['validator']['properties']['name'],
"registered": bool(data['validator']['properties']['registered']),
"owner_account_address": data['validator']['properties']['owner_account_identifier']['address'],
"external_stake_accepted": data['validator']['properties']['external_stake_accepted'],
}
@classmethod
def parse_get_validator_identifier(cls, data: Dict[str, Dict[str, str]]) -> str:
""" A function used for the parsing of the get_validator_identifier API calls.
Args:
data (dict): A dictionary of the data to parse.
Returns:
str: A string of the validator address
"""
return data['validator_identifier']['address']
@classmethod
def parse_get_validators(cls, data: Dict[str, Any]) -> List[Dict[str, Any]]:
""" A function used for the parsing of the get_validators API calls.
This parser function produces output in the following format::
[
{
"validator_address": "address",
"stake": {
"xrd_rri": "amount"
},
"owner_stake": {
"xrd_rri": "amount"
},
"uptime": {
"epoch_range": {
"from": "from_epoch",
| |
u"light nucleus: production",
u"neutralino",
u"gluino: mass",
u"parton",
u"Higgs particle: leptonic decay",
u"electron: beam",
u"polarized target",
u"potential: static",
u"glueball",
u"symmetry: O(N)",
u"eta(958)",
u"tracking detector",
u"J-PARC Lab",
u"quark: flavor: 3",
u"parton: energy loss",
u"electroweak interaction: correction",
u"Hanbury-Brown-Twiss effect",
u"momentum dependence",
u"dependence: mass number",
u"knot theory",
u"tau: semileptonic decay",
u"WIMP: dark matter",
u"WIMP: mass",
u"Higgs mechanism",
u"crystal",
u"mu-problem",
u"photon: associated production",
u"temperature: 0",
u"Lipatov equation",
u"B-factory",
u"time resolution",
u"field theory: Euclidean",
u"minisuperspace",
u"evolution equation",
u"photon: yield",
u"inflaton",
u"new physics: search for",
u"pair production",
u"diffeomorphism: invariance",
u"ABJM model",
u"matter: hadronic",
u"WIMP",
u"Cherenkov counter: imaging",
u"electron: cosmic radiation",
u"cavity",
u"anomaly: conformal",
u"BFKL equation",
u"gravitation: potential",
u"Z': mass",
u"D0: hadronic decay",
u"jet: bottom",
u"electromagnetic",
u"hidden sector",
u"fermion: Majorana",
u"total cross section: energy dependence",
u"gap",
u"algebra: conformal",
u"transverse momentum: missing-energy",
u"collective phenomena",
u"mechanics: classical",
u"error",
u"quark: polarization",
u"Born approximation",
u"gas",
u"computer",
u"mass: topological",
u"R symmetry",
u"inflaton: potential",
u"homology",
u"B+: branching ratio",
u"<NAME>",
u"experimental methods",
u"flux: magnetic",
u"equivalence principle",
u"jet: transverse momentum",
u"supercharge",
u"correction: nonperturbative",
u"sphere: fuzzy",
u"spin: 2",
u"weak interaction",
u"matter: coupling",
u"massless",
u"inflation: hybrid",
u"synchrotron radiation",
u"Weinberg angle",
u"particle identification",
u"leading logarithm approximation: higher-order",
u"mirror",
u"structure function",
u"group theory",
u"rapidity: gap",
u"isospin: violation",
u"gamma ray: cosmic radiation",
u"algebra: deformation",
u"meson: hadron spectroscopy",
u"iron",
u"fermion: pair production",
u"deep underground detector",
u"quark: Wilson",
u"p p: elastic scattering",
u"gluon: radiation",
u"field theory: classical",
u"fixed point: infrared",
u"Compton scattering: off-shell",
u"B: decay",
u"final state: two-particle",
u"differential equations: solution",
u"cryogenics",
u"asymptotic freedom",
u"HESS",
u"conductivity",
u"black hole: formation",
u"gauge field theory: abelian",
u"noncommutative",
u"scaling: violation",
u"electron p: colliding beams",
u"J/psi(3100): leptonic decay",
u"kinetic",
u"phase shift",
u"field theory: anti-de Sitter",
u"compactification: flux",
u"cluster",
u"gauge field theory: supersymmetry",
u"Z'",
u"charge: magnetic",
u"mass spectrum: dilepton",
u"neutrino: interaction",
u"quark: chiral",
u"D0 anti-D0: mixing",
u"master equation",
u"quark: mass dependence",
u"scale: electroweak interaction",
u"p: form factor",
u"Higgs particle: radiative decay",
u"gravitino",
u"quark: constituent",
u"regularization: zeta function",
u"enhancement",
u"hadron: production",
u"particle: massive",
u"quark antiquark: potential",
u"expansion: derivative",
u"fermion: domain wall",
u"hyperfine structure",
u"lepton: mixing angle",
u"interpretation of experiments: DESY HERA Stor",
u"hidden symmetry",
u"BF model",
u"defect",
u"gluon: fusion",
u"Newport News CEBAF Linac",
u"photon: polarized beam",
u"index theorem",
u"pi: photoproduction",
u"positron p: colliding beams",
u"electron",
u"electromagnetic interaction",
u"mean field approximation: relativistic",
u"density: high",
u"neutrino: pair production",
u"baryon: octet",
u"field theory: noncommutative",
u"charge",
u"neutrino: heavy",
u"scattering amplitude: higher-order",
u"charged particle: multiple production",
u"B/s0: hadronic decay",
u"mass generation",
u"operator: differential",
u"quark antiquark: annihilation",
u"satellite",
u"approximation: eikonal",
u"spin: network",
u"scaling: dimension",
u"PYTHIA",
u"Grid computing",
u"gravitino: mass",
u"three-body problem",
u"ghost: propagator",
u"baryon: mass",
u"inflation: model",
u"black hole: evaporation",
u"magnetic moment",
u"gauge boson: mass",
u"landscape",
u"mass: top",
u"algebra: Kac-Moody",
u"validity test",
u"group: Lie",
u"current: vector",
u"quasinormal mode",
u"energy: threshold",
u"quark: staggered",
u"differential cross section: measured",
u"gauge field theory: SU(2) x U(1)",
u"Auger",
u"graphene",
u"pi0: hadroproduction",
u"form factor: electric",
u"quark: mixing angle",
u"electroweak interaction: model",
u"interferometer",
u"gauge field theory: Yang-Mills: supersymmetry",
u"Bianchi identity",
u"quasar",
u"correlation: two-particle",
u"black hole: production",
u"Batavia TEVATRON PS",
u"scaling: finite size",
u"pi0: radiative decay",
u"correlation: length",
u"f0(600)",
u"invariance: modular",
u"electron p: exclusive reaction",
u"S-duality",
u"lepton: asymmetry",
u"cosmic radiation: anisotropy",
u"SU(N)",
u"field strength",
u"susceptibility",
u"dispersion",
u"dark matter: interaction",
u"anomaly: chiral",
u"violation: CPT",
u"strong interaction: CP",
u"scalar meson",
u"field theory: massless",
u"Higgs model: abelian",
u"effect: quantum",
u"bootstrap",
u"field theory: spinor",
u"space: S(5)",
u"space: de Sitter",
u"bounce",
u"star",
u"electroweak interaction: standard model",
u"potential: confinement",
u"symmetry: Lorentz",
u"reheating",
u"Higgs particle: neutral particle",
u"cosmological constant: negative",
u"f0(980)",
u"tensor: Weyl",
u"mass: Majorana",
u"temperature: reheating",
u"Poisson bracket",
u"quaternion",
u"spin: 1",
u"n: electric moment",
u"approximation",
u"central region",
u"detector: design",
u"neural network",
u"jet: production",
u"Wess-Zumino model",
u"space-time: Kerr",
u"potential: optical",
u"mass difference",
u"multiplet",
u"dark matter: decay",
u"supersymmetry: multiplet",
u"effect: finite size",
u"quark: flavor: 2",
u"soft collinear effective theory",
u"costs",
u"multiplet: vector",
u"pi: electroproduction",
u"spin dependence",
u"solar system",
u"space-time: dimension",
u"nucleon: spin",
u"moduli: stability",
u"standard model: supersymmetry",
u"optical",
u"fermion: massless",
u"angular resolution",
u"HERMES",
u"matter: strangeness",
u"geophysics",
u"structure function: spin",
u"background: stochastic",
u"baryon number: violation",
u"mass: texture",
u"WIMP: annihilation",
u"OPERA",
u"force: Casimir",
u"form factor: magnetic",
u"current: correlation function",
u"kink",
u"quantum chromodynamics: vacuum state",
u"pseudoscalar meson: pair production",
u"Compton scattering: inverse",
u"rho(770)0",
u"nucleon resonance",
u"differential cross section: transverse momentum",
u"p p: exclusive reaction",
u"toy model",
u"sparticle: hadroproduction",
u"initial state",
u"fermion: mixing angle",
u"coupling: scalar",
u"atom",
u"dark energy: holography",
u"B/s0 anti-B/s0: mixing",
u"tadpole",
u"dipole",
u"integrated circuit",
u"current: axial-vector",
u"baryon",
u"neutrino: mass spectrum",
u"quantum group",
u"form factor: electromagnetic",
u"eta",
u"K: pair production",
u"Higgs particle: triplet",
u"modulation",
u"quantum chromodynamics: holography",
u"COMPASS",
u"electroweak interaction: critical phenomena",
u"Yang-Mills: supersymmetry",
u"time",
u"oxygen",
u"hadron hadron: inclusive reaction",
u"beam transport",
u"matter",
u"transformation: Lorentz",
u"particle identification: flavor",
u"neutralino: LSP",
u"meson: mass spectrum",
u"hadron: gas",
u"K: semileptonic decay",
u"functional analysis",
u"curvature: scalar",
u"photon photon: exclusive reaction",
u"dimension: 7",
u"dark energy: equation of state",
u"scattering length",
u"baryon: heavy",
u"time projection chamber",
u"fermion: Wilson",
u"gauge boson",
u"nuclear reaction: model",
u"finite size",
u"field theory: Dirac",
u"integral equations",
u"distribution function",
u"decay rate",
u"K: hadronic decay",
u"Dvali-Gabadadze-Porrati model",
u"acceptance",
u"K-theory",
u"nucleus: recoil",
u"Juelich COSY PS",
u"cross section: upper limit",
u"big bang",
u"critical phenomena: deconfinement",
u"MINOS",
u"ionization",
u"meson: wave function",
u"p: polarized beam",
u"fermion: zero mode",
u"simplex",
u"Schroedinger equation: solution",
u"MSW effect",
u"nucleon",
u"baryon number",
u"renormalizable",
u"variational",
u"dilepton: final state",
u"Einstein-Maxwell equation: solution",
u"photon: flux",
u"deceleration",
u"gravitation: effect",
u"superfield: chiral",
u"transverse momentum: factorization",
u"radiative correction: higher-order",
u"anomaly: axial",
u"higher-order: 3",
u"star: compact",
u"linear collider: proposed",
u"bubble",
u"light front",
u"model: Glauber",
u"resonance",
u"diquark",
u"fermion: clover",
u"charge: fractional",
u"approximation: weak field",
u"heat kernel",
u"muon: flux",
u"deeply virtual Compton scattering",
u"fermion: determinant",
u"model: oscillator",
u"electron positron: inelastic scattering",
u"channel cross section: branching ratio",
u"energy: ground state",
u"little Higgs model",
u"angular distribution: anisotropy",
u"Langevin equation",
u"K0 anti-K0: mixing",
u"transverse",
u"semiconductor detector",
u"generalized uncertainty principle",
u"radioactivity",
u"vector meson: mass",
u"interpretation of experiments: CERN SPS",
u"XENON",
u"energy: conservation law",
u"space: noncommutative",
u"mass spectrum: transverse",
u"Unruh effect",
u"algebra: Poincare",
u"space-time: geometry",
u"design",
u"electron p: elastic scattering",
u"Thesis",
u"semiconductor detector: microstrip",
u"gravitation: Lovelock",
u"radiation: initial-state interaction",
u"length: minimal",
u"VERITAS",
u"spin: dependence",
u"energy: flux",
u"control system",
u"satellite: Planck",
u"dark energy: density",
u"GEANT",
u"neutrino: magnetic moment",
u"p: beam",
u"Seiberg-Witten map",
u"decay: amplitude analysis",
u"conifold",
u"neutron star: binary",
u"Z0: pair production",
u"cosmic radiation: spectrum",
u"particle flow",
u"p",
u"deep inelastic scattering: semi-inclusive reaction",
u"galaxy: halo",
u"model: chiral",
u"tau: branching ratio",
u"Gribov problem",
u"lepton",
u"VIRGO",
u"slope",
u"external field",
u"multiplicity: spectrum",
u"string model: topological",
u"photon p: inelastic scattering",
u"black hole: stability",
u"superconductivity: holography",
u"trace anomaly",
u"W: associated production",
u"quantum chromodynamics: matter",
u"density dependence",
u"neutrino: decay",
u"<NAME>",
u"silicon",
u"Poincare",
u"Lambda: hypernucleus",
u"quantization: canonical",
u"DAMA",
u"photon: absorption",
u"invariance: CPT",
u"screening",
u"neutrino: nuclear reactor",
u"fermion: overlap",
u"Brookhaven PS",
u"top: single production",
u"BooNE",
u"hard thermal loop approximation",
u"Feynman graph: penguin",
u"de Sitter",
u"operator",
u"quantum gravity: effect",
u"gravitation: induced",
u"algebra: Heisenberg",
u"missing-energy",
u"stochastic",
u"nucleosynthesis",
u"Hall effect",
u"nucleus: cosmic radiation",
u"black brane",
u"muon: storage ring",
u"universal extra dimension",
u"structure function: longitudinal",
u"neutrino nucleon: deep inelastic scattering",
u"gluon: mass",
u"preheating",
u"photon: production",
u"determinant",
u"photon: energy",
u"operator: composite",
u"Hall effect: quantum",
u"photon: electroproduction",
u"p nucleus: scattering",
u"gauge field theory: noncommutative",
u"positron p: deep inelastic scattering",
u"channel cross section: measured",
u"graph theory",
u"tetraquark",
u"shell model",
u"semiconductor detector: germanium",
u"black hole: spin",
u"nucleus: semileptonic decay",
u"scintillation counter: crystal",
u"susceptibility: topological",
u"Euclidean",
u"propagation",
u"Robertson-Walker",
u"Einstein",
u"bottom: radiative decay",
u"prepotential",
u"OPAL",
u"dependence: impact parameter",
u"Aharonov-Bohm effect",
u"quantization: constraint",
u"Ward-Takahashi identity",
u"network",
u"momentum transfer: high",
u"compactification: Calabi-Yau",
u"Higgs particle: composite",
u"multiverse",
u"fluctuation: vacuum",
u"field theory: finite temperature",
u"differential cross section: ratio",
u"Theta(1540)",
u"magnetic field: galaxy",
u"entropy: production",
u"gluon: density",
u"lepton: radiative decay",
u"Lambda",
u"collapse",
u"warped",
u"littlest Higgs model",
u"approximation: linear",
u"Ginsparg-Wilson relation",
u"neutrino: massive",
u"imaging",
u"abelian",
u"momentum spectrum",
u"electroweak interaction: radiative correction",
u"time delay",
u"lattice: anisotropy",
u"particle: massless",
u"symmetry: family",
u"homotopy",
u"positron: polarized beam",
u"binary: compact",
u"current",
u"gas electron multiplier",
u"renormalization: nonperturbative",
u"current algebra",
u"interface",
u"resistive plate chamber",
u"effect: nonlinear",
u"space: Minkowski",
u"CPT: violation",
u"gravitation: dilaton",
u"causality: violation",
u"zeta function",
u"LSP",
u"lifetime",
u"D",
u"threshold: correction",
u"differential cross section: x-dependence",
u"decay: flavor changing",
u"meson: hybrid",
u"flavor: 4",
u"Yang-Baxter equation",
u"galaxy: formation",
u"correction: relativistic",
u"upgrade",
u"family",
u"potential: thermodynamical",
u"glueball: mass",
u"neutrino/e",
u"Gross-Neveu model",
u"spin: density matrix",
u"hierarchy: gauge",
u"space-time: | |
dataset.lower() == "kitti":
train_dataset = gdata.SIM10kDetection(root=args.train_root,
splits=args.train_split, min_dataset_size=args.min_dataset_size)
val_dataset = gdata.SIM10kDetection(root=args.val_root,
splits=args.val_split)
if args.target_split != "":
target_dataset = gdata.SIM10kDetection(root=args.target_root,
splits=args.target_split, min_dataset_size=args.min_dataset_size)
else:
target_dataset = None
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
target_dataset = None
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return train_dataset, val_dataset, target_dataset, val_metric
def get_dataloader(net, train_dataset, val_dataset, target_dataset, batch_size, num_workers, args):
"""Get dataloader."""
train_bfn = batchify.Tuple(*[batchify.Append() for _ in range(5)])
train_loader = mx.gluon.data.DataLoader(
train_dataset.transform(
FasterRCNNDefaultTrainTransform(net.short, net.max_size, net, augmentation=args.augmentation)),
batch_size, True, batchify_fn=train_bfn, last_batch='rollover', num_workers=num_workers)
if target_dataset is None:
target_loader = None
else:
target_bfn = batchify.Tuple(*[batchify.Append() for _ in range(2)])
target_loader = mx.gluon.data.DataLoader(
target_dataset.transform(
FasterRCNNDefaultSEMultiTeacherTransform(net.short, net.max_size, net, teacher_num=args.num_teacher,
teacher_aug=args.teacher_aug)),
batch_size, True, batchify_fn=target_bfn, last_batch='rollover', num_workers=num_workers)
val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(3)])
val_loader = mx.gluon.data.DataLoader(
val_dataset.transform(FasterRCNNDefaultValTransform(net.short, net.max_size)),
batch_size, False, batchify_fn=val_bfn, last_batch='keep', num_workers=num_workers)
return train_loader, val_loader, target_loader
def save_params(net, logger, best_map, current_map, epoch, save_interval, prefix):
current_map = float(current_map)
if current_map > best_map[0]:
logger.info('[Epoch {}] mAP {} higher than current best {} saving to {}'.format(
epoch, current_map, best_map, '{:s}_best.params'.format(prefix)))
best_map[0] = current_map
net.save_parameters('{:s}_best.params'.format(prefix))
with open(prefix + '_best_map.log', 'a') as f:
f.write('\n{:04d}:\t{:.4f}'.format(epoch, current_map))
if save_interval and (epoch + 1) % save_interval == 0:
logger.info('[Epoch {}] Saving parameters to {}'.format(
epoch, '{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map)))
net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map))
def split_and_load(batch, ctx_list):
"""Split data to 1 batch each device."""
num_ctx = len(ctx_list)
new_batch = []
for i, data in enumerate(batch):
new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]
new_batch.append(new_data)
return new_batch
def validate(net, val_data, ctx, eval_metric):
"""Test on validation dataset."""
clipper = gcv.nn.bbox.BBoxClipToImage()
eval_metric.reset()
net.hybridize(False)
for batch in tqdm(val_data, total=len(val_data)):
batch = split_and_load(batch, ctx_list=ctx)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for x, y, im_scale in zip(*batch):
# get prediction results
ids, scores, bboxes = net(x)
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(clipper(bboxes, x))
# rescale to original resolution
im_scale = im_scale.reshape((-1)).asscalar()
det_bboxes[-1] *= im_scale
# split ground truths
gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))
gt_bboxes[-1] *= im_scale
gt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None)
# update metric
for det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff in zip(det_bboxes, det_ids, det_scores, gt_bboxes,
gt_ids, gt_difficults):
eval_metric.update(det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff)
return eval_metric.get()
def get_lr_at_iter(alpha):
return 1. / 3. * (1 - alpha) + alpha
def consistent_loss(image_feature, region_feature, scale=0.1):
image_feature = nd.reshape(image_feature, shape=(0, -1))
image_feature = nd.mean(image_feature, axis=1)
distance = nd.square(region_feature - image_feature) * scale
return distance
def train(student_list, teacher_list, train_data, val_data, target_data, eval_metric, ctx, args, se_opt_list):
"""Training pipeline"""
trainer_list = []
train_patterns = args.train_patterns
for student, teacher in zip(student_list, teacher_list):
student[0].collect_params().setattr('grad_req', 'null')
student[0].collect_train_params(train_patterns).setattr('grad_req', 'write')
teacher[0].collect_params().setattr('grad_req', 'null')
teacher[0].collect_train_params(train_patterns).setattr('grad_req', 'write')
for k, v in student[0].collect_params().items():
logger.info("all params:" + str(k))
for k, v in student[0].collect_train_params(train_patterns).items():
logger.info("training params:" + str(k))
trainer = gluon.Trainer(
student[0].collect_train_params(train_patterns), # fix batchnorm, fix first stage, etc...
'sgd',
{'learning_rate': args.lr,
'wd': args.wd,
'momentum': args.momentum,
'clip_gradient': 5})
trainer_list.append(trainer)
# lr decay policy
lr_decay = float(args.lr_decay)
lr_steps = sorted([float(ls) for ls in args.lr_decay_epoch.split(',') if ls.strip()])
lr_warmup = float(args.lr_warmup) # avoid int division
# TODO(zhreshold) losses?
rpn_cls_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
rpn_box_loss = mx.gluon.loss.HuberLoss(rho=1 / 9.) # == smoothl1
rcnn_cls_loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
rcnn_box_loss = mx.gluon.loss.HuberLoss() # == smoothl1
metrics = [mx.metric.Loss('RPN_Conf'),
mx.metric.Loss('RPN_SmoothL1'),
mx.metric.Loss('RCNN_CrossEntropy'),
mx.metric.Loss('RCNN_SmoothL1'), ]
rpn_acc_metric = RPNAccMetric()
rpn_bbox_metric = RPNL1LossMetric()
rcnn_acc_metric = RCNNAccMetric()
rcnn_bbox_metric = RCNNL1LossMetric()
metrics2 = [rpn_acc_metric, rpn_bbox_metric, rcnn_acc_metric, rcnn_bbox_metric, ScalarMetric(name="rpn_se_cnt"),
ScalarMetric(name="rpn_se_loss"), ScalarMetric(name="rcnn_se_cnt"), ScalarMetric("rcnn_se_loss"),
ScalarMetric("wd loss"), ScalarMetric("similarity loss"), ScalarMetric("inside graph loss")]
logger.info(args)
logger.info("net have classes:{}".format(student_list[0][0].classes))
logger.info('Start training from [Epoch {}]'.format(args.start_epoch))
if args.validate_resume:
map_name, mean_ap = validate(teacher_list[0], val_data, ctx, eval_metric)
logger.info("validating resuming model")
logger.info(map_name)
logger.info(mean_ap)
box_to_center = None
box_decoder = None
clipper = None
if args.dense_regions:
raise NotImplementedError
best_map = [0]
data_size = len(train_data)
data_size = data_size * 1.0
for epoch in range(args.start_epoch, args.epochs):
while lr_steps and epoch >= lr_steps[0]:
new_lr = trainer_list[0].learning_rate * lr_decay
lr_steps.pop(0)
for trainer in trainer_list:
trainer.set_learning_rate(new_lr)
logger.info("[Epoch {}] Set learning rate to {}".format(epoch, new_lr))
for metric in metrics:
metric.reset()
tic = time.time()
btic = time.time()
for student in student_list:
if args.hybrid:
student.hybridize(static_alloc=True)
else:
student.hybridize(False)
for teacher in teacher_list:
if args.hybrid:
teacher.hybridize(static_alloc=True)
else:
teacher.hybridize(False)
base_lr = trainer_list[0].learning_rate
conf_thres = 0
if target_data is None:
target_data_size = len(train_data)
target_data = []
for _ in range(target_data_size):
target_data.append([nd.zeros((1,)), nd.zeros((1,))])
if len(train_data) != len(target_data):
logger.info(
"train data has: {} items but target data has: {} items, it would be better if they have the same number".format(
len(train_data), len(target_data)))
logger.info("training data has: {} items".format(min(len(train_data), len(target_data))))
for i, (batch, target_batch) in tqdm(enumerate(zip(train_data, target_data)),
total=min(len(train_data), len(target_data))):
if epoch == 0 and i <= lr_warmup:
# adjust based on real percentage
new_lr = base_lr * get_lr_at_iter(i / lr_warmup)
if new_lr != trainer_list[0].learning_rate:
if i % args.log_interval == 0:
logger.info('[Epoch 0 Iteration {}] Set learning rate to {}'.format(i, new_lr))
for trainer in trainer_list:
trainer.set_learning_rate(new_lr)
batch = split_and_load(batch, ctx_list=ctx)
if args.use_se:
target_batch_new = []
for target_batch_item1 in target_batch:
tmp_batch = []
for tmp_data in target_batch_item1:
tmp_data_reshape = nd.reshape(tmp_data, shape=(-1, 3, 0, 0))
tmp_batch.append(tmp_data_reshape)
target_batch_new.append(tmp_batch)
target_batch = target_batch_new
target_batch = split_and_load(target_batch, ctx_list=ctx)
else:
target_batch = [[nd.zeros(shape=(1,), ctx=ctx[ctx_idx]) for ctx_idx in range(len(ctx))],
[nd.zeros(shape=(1,), ctx=ctx[ctx_idx]) for ctx_idx in range(len(ctx))]]
batch_size = len(batch[0])
losses = []
metric_losses = [[] for _ in metrics]
add_losses = [[] for _ in metrics2]
with autograd.record():
for data, label, rpn_cls_targets, rpn_box_targets, rpn_box_masks, target_data_image_1, target_data_image_2 in zip(
*(batch + target_batch)):
gt_label = label[:, :, 4:5]
gt_box = label[:, :, :4]
mask_score = None
valid_index = 1.0
teacher_roi_list = None
if args.num_teacher > 1 and args.use_se:
raise NotImplementedError
idx_list = [k for k in range(args.num_teacher)]
for idx, student, teacher in zip(idx_list, student_list, teacher_list):
if args.student_se_loss:
raise NotImplementedError
cls_pred, box_pred, roi, samples, matches, rpn_score, rpn_box, anchors, _, _ = student[0](
data, gt_box, None)
if args.use_se:
target_data_image_t = target_data_image_1[idx:idx + 1, :, :, :]
cls_pred_1, _, roi_1, _, _, rpn_score_1, rpn_box_1, anchor_for_se, _, top_feature_1 = \
teacher[0](
target_data_image_t,
None,
teacher_roi_list)
target_data_image_t = target_data_image_2[idx:idx + 1, :, :, :]
if teacher_roi_list is None:
if args.dense_regions:
raise NotImplementedError
else:
teacher_roi_list = roi_1
if args.dense_regions:
raise NotImplementedError
cls_pred_2, _, roi_2, _, _, rpn_score_2, rpn_box_2, _, _, top_feature_2 = student[0](
target_data_image_t,
None,
teacher_roi_list)
# cls_pred_2 = nd.stop_gradient(cls_pred_2)
cls_pred_1_softmax = nd.softmax(cls_pred_1, axis=-1)
cls_pred_2_softmax = nd.softmax(cls_pred_2, axis=-1)
if not args.teacher_agree:
max_score = nd.max(cls_pred_1_softmax[:, :, 1:], axis=-1) * valid_index
if args.fixed_conf_thres:
conf_thres = args.base_conf_thres
else:
if args.continue_conf_thres:
conf_thres = 1. + epoch + 1.0 * i / data_size
else:
conf_thres = 1. + epoch
if args.linear_conf:
conf_thres = (conf_thres - 1) / args.conf_decay_epoch * (
1 - args.base_conf_thres) + args.base_conf_thres
else:
conf_thres = np.log(conf_thres) / np.log(args.conf_decay_epoch) * (
1 - args.base_conf_thres) + args.base_conf_thres
mask_score = max_score > conf_thres
conf_thres = min(conf_thres, args.max_conf_thres)
if args.similarity_weight > 0:
max_label = nd.argmax(cls_pred_1_softmax[:, :, :], axis=-1) * valid_index
max_label_info = nd.broadcast_equal(nd.transpose(max_label), max_label)
if args.similarity_feature == "prob":
raise NotImplementedError
elif args.similarity_feature == "visual":
sim_feature_1 = top_feature_1[:, :, 0, 0]
sim_feature_2 = top_feature_2[:, :, 0, 0]
else:
assert 1 == 0, "unsupport similarity feature {}".format(args.similarity_feature)
if args.similarity_metric == "cosine":
sim_feature_1_t = nd.L2Normalization(sim_feature_1, mode="instance")
sim_feature_2_t = nd.L2Normalization(sim_feature_2, mode="instance")
similarity_1 = nd.dot(sim_feature_1_t, nd.transpose(sim_feature_1_t))
similarity_2 = nd.dot(sim_feature_2_t, nd.transpose(sim_feature_2_t))
else:
assert 1 == 0, "unsupport similarity metric {}".format(args.similarity_metric)
if args.post_softmax:
similarity_1 = nd.softmax(similarity_1, axis=-1)
similarity_2 = nd.softmax(similarity_2, axis=-1)
if args.similarity_distance_metric == "l1":
raise NotImplementedError
elif args.similarity_distance_metric == "l2":
similarity_diff = nd.square(similarity_1 - similarity_2)
if args.similarity_metric == "l0" or args.similarity_metric == "l1" or args.similarity_metric == "l2" or args.similarity_metric == "id":
similarity_diff = nd.sum(similarity_diff, axis=2)
else:
assert 1 == 0, "unsupport similarity distance metric {}".format(
args.similarity_distance_metric)
similarity_mask = nd.dot(nd.transpose(mask_score), mask_score)
if args.similarity_mask_with_equal_label:
similarity_mask = similarity_mask * max_label_info + args.similarity_negative_weight * similarity_mask
if args.distance_based_inside_graph:
sim_feature_1 = top_feature_1[:, :, 0, 0]
sim_feature_2 = top_feature_2[:, :, 0, 0]
sim_feature_1_1 = nd.expand_dims(sim_feature_1, axis=1)
sim_feature_1_2 = nd.expand_dims(sim_feature_1, axis=0)
similarity_1 = nd.square(nd.broadcast_minus(sim_feature_1_1, sim_feature_1_2))
sim_feature_2_1 = nd.expand_dims(sim_feature_2, axis=1)
sim_feature_2_2 = nd.expand_dims(sim_feature_2, axis=0)
similarity_2 = nd.square(nd.broadcast_minus(sim_feature_2_1, sim_feature_2_2))
similarity_1_summed = nd.mean(similarity_1, axis=2)
similarity_2_summed = nd.mean(similarity_2, axis=2)
inside_graph_loss_1 = similarity_1_summed * max_label_info + (
1 - max_label_info) * nd.relu(5 - similarity_1_summed)
inside_graph_loss_2 = similarity_2_summed * max_label_info + (
1 - max_label_info) * nd.relu(5 - | |
import json
from enum import Enum
import requests
from requests import Response
import os
from crawler.crawler_bis.fdutils import string_or_path, web_downloader
try:
from urllib import quote # Python 2.X
except ImportError:
from urllib.parse import quote # Python 3+
class FlickrPrivacyFilter(Enum):
default = u""
public = u"&privacy_filter=1"
friends = u"&privacy_filter=2"
family = u"&privacy_filter=3"
friends_family = u"&privacy_filter=4"
private = u"&privacy_filter=5"
class FlickrSafeSearch(Enum):
default = u"&safe_search=1"
class FlickrContentType(Enum):
default = u""
photos = u"&content_type=1"
screenshoots = u"&content_type=2"
other = u"&content_type=3"
photos_screenshots = u"&content_type=4"
screenshoots_other = u"&content_type=5"
photos_other = u"&content_type=6"
photos_screenshots_other = u"&content_type=7"
class FlickrTagMode(Enum):
default = u""
any = u'&tag_mode=any' # logic OR
all = u'&tag_mode=all' # logic AND
class FlickrMedia(Enum):
default = u""
photos = u"&media=photos"
videos = u"&media=videos"
class FlickrResponseFormat(Enum):
JSON = u"&format=json&nojsoncallback=1"
JSONP = u"&format=json"
XML = u"&format=rest"
PHP_SERIAL = u"&format=php_serial"
default = JSON
class FlickrImageSize(Enum):
default = u""
square_75x75 = u"_s" # 75 x 75
square_150x150 = u"_q" # 150 x 150
longedge_100 = u"_t" # 100 on the longest edge
longedge_240 = u"_m" # 240 on the longest edge
longedge_320 = u"_n" # 320 on the longest edge
longedge_500 = u"_-" # 500 on the longest edge
longedge_640 = u"_z" # 640 on the longest edge
# 800 on the longest edge (flickr new feature from 01/03/2012)
longedge_800 = u"_c"
longedge_1024 = u"_b" # 1024 on the longest edge
# 1600 on the longest edge (flickr new feature from 01/03/2012)
longedge_1600 = u"_h"
# 2048 on the longest edge (flickr new feature from 01/03/2012)
longedge_2048 = u"_k"
# class FlickrImageExtension(Enum):
# jpg = "jpg"
# png = "png"
# gif = "gif"
# default = jpg
def flickr_photos_search(api_key_or_file_path, # type: unicode
n_images=100, # type: int
query_text=None, # type: unicode
tags=None, # type: unicode
tag_mode=FlickrTagMode.default, # type: FlickrTagMode
content_type=FlickrContentType.default, # type: FlickrContentType
safe_search=FlickrSafeSearch.default,
media=FlickrMedia.default, # type: FlickrMedia
response_format=FlickrResponseFormat.JSON, # type: FlickrResponseFormat
license_id=None,
camera=None
):
# type: (unicode, int, unicode, unicode, FlickrTagMode, FlickrContentType, FlickrMedia, FlickrResponseFormat) -> list(Response)
"""
:rtype: list(Response)
"""
MAX_IMAGES_PER_PAGE = 500 # fixed by flickr api
if isinstance(api_key_or_file_path, str):
api_key_or_file_path = str(api_key_or_file_path)
if not isinstance(api_key_or_file_path, str):
raise ValueError(
u"api_key_or_file_path must be a unicode (flickr api key or path to text file containing key).")
api_key = string_or_path(api_key_or_file_path).split(" ")[0].split("\n")[0]
query = u"https://api.flickr.com/services/rest/?method=flickr.photos.search"
query += u"&api_key=" + api_key
if camera is None:
camera_str = ''
else:
camera_str = camera
if isinstance(query_text, str):
query_text = str(query_text)
if isinstance(tags, str):
tags = str(tags)
# if isinstance(query_text, unicode):
if query_text is not None:
query += camera_str + u"&text=" + quote(query_text.encode('utf-8'))
# if isinstance(tags, unicode):
if tags is not None:
query += u"&tags=" + \
quote(tags.encode('utf-8')) + tag_mode.value
# query.replace(" ", "%20")
query += content_type.value + safe_search.value + media.value + \
response_format.value + FlickrPrivacyFilter.public.value
if license_id is not None:
query += u"&license_id=" + str(license_id)
print(query)
rest = n_images % MAX_IMAGES_PER_PAGE
n_queries = int(n_images / MAX_IMAGES_PER_PAGE)
query_len_list = [MAX_IMAGES_PER_PAGE] * n_queries
if rest > 0:
query_len_list.append(rest)
responses = []
for i, query_len in enumerate(query_len_list):
page_query = query + u"&per_page=" + \
str(query_len) + u"&page=" + str(i + 1)
responses.append(requests.get(page_query))
return responses
def flickr_photos_links(api_key_or_file_path, # type: unicode
n_images=100, # type: int
query_text=None, # type: unicode
tags=None, # type: unicode
image_size=FlickrImageSize.default, # type: FlickrImageSize
tag_mode=FlickrTagMode.default, # type: FlickrTagMode
content_type=FlickrContentType.default, # type: FlickrContentType
media=FlickrMedia.default, # type: FlickrMedia
license_id=None,
verbose=False,
ignore_errors=False,
max_error_retries=3,
camera=None
):
# type: (...) -> list(unicode)
retry = 0
links = []
while retry < max_error_retries:
links = []
try:
responses = flickr_photos_search(api_key_or_file_path=api_key_or_file_path, n_images=n_images,
query_text=query_text, tags=tags, tag_mode=tag_mode,
content_type=content_type, media=media,
response_format=FlickrResponseFormat.JSON, license_id=license_id,
camera=camera
)
for response in responses:
if response.ok:
content = response.content
data = json.loads(content)
if 'photos' in data.keys():
data = data['photos']['photo']
for d in data:
# https://farm{farm-id}.staticflickr.com/{server-id}/{id}_{secret}_[mstzb].jpg
lnk = u"https://farm{}.staticflickr.com/{}/{}_{}{}.jpg"\
.format(d['farm'], d['server'], d['id'], d['secret'], image_size.value)
links.append(lnk)
else:
if not ignore_errors:
print(
u"Format error in received json (can't find key 'photos').")
if 'message' in data.keys():
print(u"Received Message: {}".format(
data['message'].encode("utf-8")))
else:
if not ignore_errors:
print(u"Flickr response not ok.")
if verbose:
print(u"Links retrived from flickr responses: {}".format(len(links)))
return links
except ValueError as v:
retry -= 1
if verbose or not ignore_errors:
print(u"Value Error in flickr_photos_links process:")
print(v.message)
if retry < max_error_retries:
print(u"Retry {}".format(retry))
return links
def flickr_photos_downloader(api_key_or_file_path, # type: unicode
n_images=100, # type: int
query_text=None, # type: unicode
tags=None, # type: unicode
tag_mode=FlickrTagMode.default, # type: FlickrTagMode
image_size=FlickrImageSize.default, # type: FlickrImageSize
content_type=FlickrContentType.default, # type: FlickrContentType
media=FlickrMedia.default, # type: FlickrMedia
license_id=None,
download_path=u"",
save_filename_prefix=u"flickr_",
forced_extension=None,
verbose=False,
ignore_errors=False,
max_error_retries=3,
camera=None
):
# type: (...) -> list(unicode)
if os.path.isfile('../links-selfie-6000.txt'):
print("links file found")
with open('../links-selfie-6000.txt', 'r') as file:
links = file.read().split('\n')
else:
print("links not found; loading and writing to text file")
links = flickr_photos_links(api_key_or_file_path=api_key_or_file_path, query_text=query_text, tags=tags, n_images=n_images,
image_size=image_size, tag_mode=tag_mode, content_type=content_type, media=media,
verbose=verbose, ignore_errors=ignore_errors, license_id=license_id, max_error_retries=max_error_retries, camera=camera)
with open('../links-selfie-6000.txt', 'w') as file:
file.write("\n".join(links))
for i in range(len(links)):
links[i] = links[i].replace('_h', '_b')
links = list(set(links))
print(len(links))
web_downloader(link_list=links, download_path=download_path, k=0, save_filename_prefix=save_filename_prefix,
forced_extension=forced_extension, verbose=verbose, ignore_errors=ignore_errors)
return links
# Flickr Documentations:
# For search:
# https://www.flickr.com/services/api/explore/flickr.photos.search
# (use public = 1 in the query!)
# (log in in flickr to automatically insert my api key).
#
#
# To download images, look at here: https://www.flickr.com/services/api/misc.urls.html
# example: http://farm3.staticflickr.com/2636/32179988483_cd41d8fca9_b.jpg
#
# Otherwise you can use getSize query and use the source response to get the direct link:
# http://www.flickr.com/services/api/flickr.photos.getSizes.html
#
# * * * * * * * * * * * * * * * * * * * SEARCH API * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# SEARCH API ( https://www.flickr.com/services/api/flickr.photos.search.html )
# EXAMPLE OF QUERY:
# https://api.flickr.com/services/rest/?method=flickr.photos.search
# &api_key=API_KEY
# &tags=art
# &text=david
# &per_page=100
# &page=1
# &format=json
# tags (Not mandatory)
# A comma-delimited list of tags. Photos with one or more of the tags listed will be returned. You can exclude results
# that match a term by prepending it with a - character.
# tag_mode (Not mandatory)
# Either 'any' for an OR combination of tags, or 'all' for an AND combination. Defaults to 'any' if not specified.
# text (Not mandatory)
# A free text search. Photos who's title, description or tags contain the text will be returned. You can exclude results
# that match a term by prepending it with a - character.
# sort (Not mandatory)
# The order in which to sort returned photos. Deafults to date-posted-desc (unless you are doing a radial geo query, in
# which case the default sorting is by ascending distance from the point specified). The possible values are:
# date-posted-asc,
# date-posted-desc,
# date-taken-asc,
# date-taken-desc,
# interestingness-desc,
# interestingness-asc,
# relevance.
# privacy_filter (Not mandatory)
# Return photos only matching a certain privacy level.
# This only applies when making an authenticated call to view photos you own.
# Valid values are:
# 1 public photos
# 2 private photos visible to friends
# 3 private photos visible to family
# 4 private photos visible to friends & family
# 5 completely private photos
# content_type (Not mandatory)
# Content Type setting:
# 1 for photos only.
# 2 for screenshots only.
# 3 for 'other' only.
# 4 for photos and screenshots.
# 5 for screenshots and 'other'.
# 6 for photos and 'other'.
# 7 for photos, screenshots, and 'other' (all).
# media (Not mandatory)
# Filter results by media type. Possible values are all (default), photos or videos
# per_page (Not mandatory)
# Number of photos to return per page. If this argument is omitted, it defaults to 100. The maximum allowed value is 500.
# page (Not mandatory)
# The page of results to return. If this argument is omitted, it defaults to 1.
# FOR LOCALIZATION:
# geo_context (Not mandatory)
# Geo context is a numeric value representing the photo's geotagginess beyond latitude and longitude.
# For example, you may wish to search for photos that were taken "indoors" or "outdoors".
# The current list of context IDs is :
# 0, not defined.
# 1, indoors.
# 2, outdoors.
# Geo queries require some sort of limiting agent in order to prevent the database from crying.
# This is basically like the check against "parameterless searches" for queries without a geo component.
#
# A tag, for instance, is considered a limiting agent as are user defined min_date_taken and min_date_upload parameters
# If no limiting factor is passed we return only photos added in the last 12 hours (though | |
<filename>src/quo/layout/containers.py
"""
Container for the layout.
(Containers can contain other containers or user interface controls.)
"""
from abc import ABCMeta, abstractmethod
from enum import Enum
from functools import partial
from typing import (
TYPE_CHECKING,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from quo.suite.current import get_app
from quo.cache import SimpleCache
from quo.data_structures import Point
from quo.filters import (
FilterOrBool,
emacs_insert_mode,
to_filter,
vi_insert_mode,
)
from quo.text import (
AnyFormattedText,
StyleAndTextTuples,
to_formatted_text,
)
from quo.text.utils import (
fragment_list_to_text,
fragment_list_width,
)
from quo.keys.key_binding import KeyBindingsBase
from quo.mouse_events import MouseEvent, MouseEventType
from quo.utils.utils import get_width as get_cwidth
from quo.utils.utils import take_using_weights, to_int, to_str
from .controls import (
DummyControl,
FormattedTextControl,
GetLinePrefixCallable,
UIContent,
UIControl,
)
from .dimension import (
AnyDimension,
Dimension,
max_layout_dimensions,
sum_layout_dimensions,
to_dimension,
)
from .margins import Margin
from .mouse_handlers import MouseHandlers
from .screen import _CHAR_CACHE, Screen, WritePosition
from .utils import explode_text_fragments
if TYPE_CHECKING:
from typing_extensions import Protocol
NotImplementedOrNone = object
__all__ = [
"AnyContainer",
"Container",
"HorizontalAlign",
"VerticalAlign",
"HSplit",
"VSplit",
"FloatContainer",
"Float",
"WindowAlign",
"Window",
"WindowRenderInfo",
"ConditionalContainer",
"ScrollOffsets",
"ColorColumn",
"to_container",
"to_window",
"is_container",
"DynamicContainer",
]
class Container(metaclass=ABCMeta):
"""
Base class for user interface layout.
"""
@abstractmethod
def reset(self) -> None:
"""
Reset the state of this container and all the children.
(E.g. reset scroll offsets, etc...)
"""
@abstractmethod
def preferred_width(self, max_available_width: int) -> Dimension:
"""
Return a :class:`~quo.layout.Dimension` that represents the
desired width for this container.
"""
@abstractmethod
def preferred_height(self, width: int, max_available_height: int) -> Dimension:
"""
Return a :class:`~quo.layout.Dimension` that represents the
desired height for this container.
"""
@abstractmethod
def write_to_screen(
self,
screen: Screen,
mouse_handlers: MouseHandlers,
write_position: WritePosition,
parent_style: str,
erase_bg: bool,
z_index: Optional[int],
) -> None:
"""
Write the actual content to the screen.
:param screen: :class:`~quo.layout.screen.Screen`
:param mouse_handlers: :class:`~quo.layout.mouse_handlers.MouseHandlers`.
:param parent_style: Style string to pass to the :class:`.Window`
object. This will be applied to all content of the windows.
:class:`.VSplit` and :class:`.HSplit` can use it to pass their
style down to the windows that they contain.
:param z_index: Used for propagating z_index from parent to child.
"""
def is_modal(self) -> bool:
"""
When this container is modal, key bindings from parent containers are
not taken into account if a user control in this container is focused.
"""
return False
def get_key_bindings(self) -> Optional[KeyBindingsBase]:
"""
Returns a :class:`.KeyBinder` object. These bindings become active when any
user control in this container has the focus, except if any containers
between this container and the focused user control is modal.
"""
return None
@abstractmethod
def get_children(self) -> List["Container"]:
"""
Return the list of child :class:`.Container` objects.
"""
return []
if TYPE_CHECKING:
class MagicContainer(Protocol):
"""
Any object that implements ``__pt_container__`` represents a container.
"""
def __pt_container__(self) -> "AnyContainer":
...
AnyContainer = Union[Container, "MagicContainer"]
def _window_too_small() -> "Window":
"Create a `Window` that displays the 'Window too small' text."
return Window(
FormattedTextControl(text=[("class:window-too-small", "𝚃𝚑𝚎 𝚠𝚒𝚗𝚍𝚘𝚠 𝚒𝚜 𝚝𝚘𝚘 𝚜𝚖𝚊𝚕𝚕,\n 𝚣𝚘𝚘𝚖 𝚘𝚞𝚝...⏳")])
)
class VerticalAlign(Enum):
"Alignment for `HSplit`."
TOP = "TOP"
CENTER = "CENTER"
BOTTOM = "BOTTOM"
JUSTIFY = "JUSTIFY"
class HorizontalAlign(Enum):
"Alignment for `VSplit`."
LEFT = "LEFT"
CENTER = "CENTER"
RIGHT = "RIGHT"
JUSTIFY = "JUSTIFY"
class _Split(Container):
"""
The common parts of `VSplit` and `HSplit`.
"""
def __init__(
self,
subset: Sequence[AnyContainer],
window_too_small: Optional[Container] = None,
padding: AnyDimension = Dimension.exact(0),
padding_char: Optional[str] = None,
padding_style: str = "",
width: AnyDimension = None,
height: AnyDimension = None,
z_index: Optional[int] = None,
modal: bool = False,
bind: Optional[KeyBindingsBase] = None,
style: Union[str, Callable[[], str]] = "",
) -> None:
self.subset = [to_container(c) for c in subset]
self.window_too_small = window_too_small or _window_too_small()
self.padding = padding
self.padding_char = padding_char
self.padding_style = padding_style
self.width = width
self.height = height
self.z_index = z_index
self.modal = modal
self.bind = bind
self.style = style
def is_modal(self) -> bool:
return self.modal
def get_key_bindings(self) -> Optional[KeyBindingsBase]:
return self.bind
def get_children(self) -> List[Container]:
return self.subset
class HSplit(_Split):
"""
Several layouts, one stacked above/under the other. ::
+--------------------+
| |
+--------------------+
| |
+--------------------+
By default, this doesn't display a horizontal line between the children,
but if this is something you need, then create a HSplit as follows::
HSplit(subset=[ ... ], padding_char='-',
padding=1, padding_style='green')
:param subset: List of child :class:`.Container` objects.
:param window_too_small: A :class:`.Container` object that is displayed if
there is not enough space for all the subsets By default, this is a
"Window too small" message.
:param align: `VerticalAlign` value.
:param width: When given, use this width instead of looking at the children.
:param height: When given, use this height instead of looking at the children.
:param z_index: (int or None) When specified, this can be used to bring
element in front of floating elements. `None` means: inherit from parent.
:param style: A style string.
:param modal: ``True`` or ``False``.
:param bind: ``None`` or a :class:`.KeyBinder` object.
:param padding: (`Dimension` or int), size to be used for the padding.
:param padding_char: Character to be used for filling in the padding.
:param padding_style: Style to applied to the padding.
"""
def __init__(
self,
subset: Sequence[AnyContainer],
window_too_small: Optional[Container] = None,
align: VerticalAlign = VerticalAlign.JUSTIFY,
padding: AnyDimension = 0,
padding_char: Optional[str] = None,
padding_style: str = "",
width: AnyDimension = None,
height: AnyDimension = None,
z_index: Optional[int] = None,
modal: bool = False,
bind: Optional[KeyBindingsBase] = None,
style: Union[str, Callable[[], str]] = "",
) -> None:
super().__init__(
subset=subset,
window_too_small=window_too_small,
padding=padding,
padding_char=padding_char,
padding_style=padding_style,
width=width,
height=height,
z_index=z_index,
modal=modal,
bind=bind,
style=style,
)
self.align = align
self._subset_cache: SimpleCache[
Tuple[Container, ...], List[Container]
] = SimpleCache(maxsize=1)
self._remaining_space_window = Window() # Dummy window.
def preferred_width(self, max_available_width: int) -> Dimension:
if self.width is not None:
return to_dimension(self.width)
if self.subset:
dimensions = [c.preferred_width(max_available_width) for c in self.subset]
return max_layout_dimensions(dimensions)
else:
return Dimension()
def preferred_height(self, width: int, max_available_height: int) -> Dimension:
if self.height is not None:
return to_dimension(self.height)
dimensions = [
c.preferred_height(width, max_available_height) for c in self._all_subset
]
return sum_layout_dimensions(dimensions)
def reset(self) -> None:
for c in self.subset:
c.reset()
@property
def _all_subset(self) -> List[Container]:
"""
List of child objects, including padding.
"""
def get() -> List[Container]:
result: List[Container] = []
# Padding Top.
if self.align in (VerticalAlign.CENTER, VerticalAlign.BOTTOM):
result.append(Window(width=Dimension(preferred=0)))
# The children with padding.
for child in self.subset:
result.append(child)
result.append(
Window(
height=self.padding,
char=self.padding_char,
style=self.padding_style,
)
)
if result:
result.pop()
# Padding right.
if self.align in (VerticalAlign.CENTER, VerticalAlign.TOP):
result.append(Window(width=Dimension(preferred=0)))
return result
return self._subset_cache.get(tuple(self.subset), get)
def write_to_screen(
self,
screen: Screen,
mouse_handlers: MouseHandlers,
write_position: WritePosition,
parent_style: str,
erase_bg: bool,
z_index: Optional[int],
) -> None:
"""
Render the prompt to a `Screen` instance.
:param screen: The :class:`~quo.layout.screen.Screen` class
to which the output has to be written.
"""
sizes = self._divide_heights(write_position)
style = parent_style + " " + to_str(self.style)
z_index = z_index if self.z_index is None else self.z_index
if sizes is None:
self.window_too_small.write_to_screen(
screen, mouse_handlers, write_position, style, erase_bg, z_index
)
else:
#
ypos = write_position.ypos
xpos = write_position.xpos
width = write_position.width
# Draw child panes.
for s, c in zip(sizes, self._all_subset):
c.write_to_screen(
screen,
mouse_handlers,
WritePosition(xpos, ypos, width, s),
style,
erase_bg,
z_index,
)
ypos += s
# Fill in the remaining space. This happens when a child control
# refuses to take more space and we don't have any padding. Adding a
# dummy child control for this (in `self._all_children`) is not
# desired, because in some situations, it would take more space, even
# when it's not required. This is required to apply the styling.
remaining_height = write_position.ypos + write_position.height - ypos
if remaining_height > 0:
self._remaining_space_window.write_to_screen(
screen,
mouse_handlers,
WritePosition(xpos, ypos, width, remaining_height),
style,
erase_bg,
z_index,
)
def _divide_heights(self, write_position: WritePosition) -> Optional[List[int]]:
"""
Return the heights for all rows.
Or None when there is not enough space.
"""
if not self.subset:
return []
width = write_position.width
height = write_position.height
# Calculate heights.
dimensions = [c.preferred_height(width, height) for c in self._all_subset]
# Sum dimensions
sum_dimensions = sum_layout_dimensions(dimensions)
# If there is not enough space for both.
# Don't do anything.
if sum_dimensions.min > height:
return None
# Find optimal sizes. (Start with minimal size, increase until we cover
# the whole height.)
sizes = [d.min for d in dimensions]
child_generator = take_using_weights(
items=list(range(len(dimensions))), weights=[d.weight for d in dimensions]
)
i = next(child_generator)
# Increase until we meet at least the 'preferred' size.
preferred_stop = min(height, sum_dimensions.preferred)
preferred_dimensions = [d.preferred for d | |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 16 17:02:37 2018
@author: shenzx
"""
from __future__ import print_function
import warnings
import shutil
from os.path import join
import numpy as np
from ase.io.abacus import write_input_stru
from ase.calculators.calculator import FileIOCalculator
# copyright © Key Lab of Quantum Information, CAS, China
"""This module defines an ASE interface to ABACUS
Developed on the basis of modules by <NAME>.
The path of the directory containing the
pseudopotential and basis directories (LDA, PBE, SG15, ORBITAL, ...)
should be set by the enviromental flag $ABACUS_PP_PATH, $ABACUS_ORBITAL_PATH.
The user should also set the enviroment flag
$ABACUS_SCRIPT pointing to a python script looking
like::
import os
exitcode = os.system('abacus')
http://abacus.ustc.edu.cn/
"""
# Parameters list that can be set in INPUT. -START-
# 1
general_keys = [
'suffix', # the name of main output directory
'latname', # the name of lattice name
'atom_file', # the filename of file containing atom positions
'kpoint_file', # the name of file containing k points
'pseudo_dir', # the directory containing pseudo files
'pseudo_type', # the type pseudo files
'dft_functional', # exchange correlation functional
'calculation', # test; scf; relax; nscf; ienvelope; istate;
'ntype', # atom species number
'nspin', # 1: single spin; 2: up and down spin;
'nbands', # number of bands
'nbands_istate', # number of bands around Fermi level for istate calulation
'symmetry', # turn symmetry on or off
'nelec' # input number of electrons
]
# 2
pw_keys = [
'ecutwfc', # energy cutoff for wave functions
'ethr', # threshold for eigenvalues is cg electron iterations
'dr2', # charge density error
'start_wfc', # start wave functions are from 'atomic' or 'file'
'start_charge', # start charge is from 'atomic' or file
'charge_extrap', # atomic; first-order; second-order; dm:coefficients of SIA
'out_charge', # >0 output charge density for selected electron steps
'out_potential', # output realspace potential
'out_wf', # output wave functions
'out_dos', # output energy and dos
'out_band', # output energy and band structure
'nx', # number of points along x axis for FFT grid
'ny', # number of points along y axis for FFT grid
'nz' # number of points along z axis for FFT grid
]
# 3
relaxation_keys = [
'ks_solver', # cg; david; lapack; genelpa; hpseps;
'niter', # number of electron iterations
'vna', # use the vna or not
'grid_speed', # 1:normal 2:fast
'force_set', # output the force_set or not
'force', # calculate the force
'nstep', # number of ion iteration steps
'out_stru', # output the structure files after each ion step
'force_thr', # force threshold, unit: Ry/Bohr
'force_thr_ev', # force threshold, unit: eV/Angstrom
'force_thr_ev2', # force invalid threshold, unit: eV/Angstrom
'stress_thr', # stress threshold
'press1', # target pressure, unit: KBar
'press2', # target pressure, unit: KBar
'press3', # target pressure, unit: KBar
'bfgs_w1', # wolfe condition 1 for bfgs
'bfgs_w2', # wolfe condition 2 for bfgs
'trust_radius_max', # maximal trust radius, unit: Bohr
'trust_radius_min', # minimal trust radius, unit: Bohr
'trust_radius_ini', # initial trust radius, unit: Bohr
'stress', # calculate the stress or not
'fixed_axes', # which axes are fixed
'move_method', # bfgs; sd; cg; cg_bfgs;
'out_level', # ie(for electrons); i(for ions);
'out_dm' # >0 output density matrix
]
# 4
lcao_keys = [
'basis_type', # PW; LCAO in pw; LCAO
'search_radius', # input search radius (Bohr)
'search_pbc', # input periodic boundary condition
'lcao_ecut', # energy cutoff for LCAO
'lcao_dk', # delta k for 1D integration in LCAO
'lcao_dr', # delta r for 1D integration in LCAO
'lcao_rmax', # max R for 1D two-center integration table
'out_hs', # output H and S matrix
'out_lowf', # ouput LCAO wave functions
'bx', # division of an element grid in FFT grid along x
'by', # division of an element grid in FFT grid along y
'bz' # division of an element grid in FFT grid along z
]
# 5
smearing_keys = [
'smearing', # type of smearing: gauss; fd; fixed; mp; mp2
'sigma' # energy range for smearing
]
# 6
charge_mixing_keys = [
'mixing_type', # plain; kerker; pulay; pulay-kerker
'mixing_beta', # mixing parameter: 0 means no new charge
'mixing_ndim', # mixing dimension in pulay
'mixing_gg0' # mixing parameter in kerker
]
# 7
dos_keys = [
'dos_emin_ev', # minimal range for dos
'dos_emax_ev', # maximal range for dos
'dos_edelta_ev', # delta energy for dos
'dos_sigma' # gauss b coefficeinet(default=0.07)
]
# 8
technique_keys = [
'gamma_only', # gamma only
'diago_proc', # number of proc used to diago
'npool', # number of pools for k points, pw only
'sparse_matrix', # use sparse matrix, in DMM
'atom_distribution', # distribute atoms, in DMM
'mem_saver', # memory saver for many k points used
'printe' # print band energy for selectively ionic steps
]
# 9
siao_keys = [
'selinv_npole', # number of selected poles
'selinv_temp', # temperature for Fermi-Dirac distribution
'selinv_gap', # supposed gap in the calculation
'selinv_deltae', # expected energy range
'selinv_mu', # chosen mu as Fermi energy
'selinv_threshold', # threshold for calculated electron number
'selinv_niter', # max number of steps to update mu
]
# 10
molecular_dynamics_keys = [
'md_mdtype', # choose ensemble
'md_dt', # time step
'md_nresn', # parameter during integrater
'md_nyosh', # parameter during integrater
'md_qmass', # mass of thermostat
'md_tfirst', # temperature first
'md_tlast', # temperature last
'md_dumpmdfred', # The period to dump MD information for monitoring and restarting MD
'md_mdoutpath', # output path of md
'md_domsd', # whether compute <r(t)-r(0)>
'md_domsdatom', # whether compute msd for each atom
'md_rstmd', # whether restart
'md_fixtemperature', # period to change temperature
'md_ediff', # parameter for constraining total energy change
'md_ediffg', # parameter for constraining max force change
'md_msdstarttime' # choose which step that msd be calculated
]
# 11
efield_keys = [
'efield', # add electric field
'edir', # add electric field
'emaxpos', # maximal position of efield [0, 1
'eopreg', # where sawlike potential decrease
'eamp', # amplitute of the efield, unit is a.u.
'eamp_v' # amplitute of the efield, unit is V/A
]
# 12
bfield_keys = [
'bfield', # add magnetic field
'bfield_teslax', # magnetic field strength
'bfield_teslay', # magnetic field strength
'bfield_teslaz', # magnetic field strength
'bfield_gauge_x', # magnetic field gauge origin
'bfield_gauge_y', # magnetic field gauge origin
'bfield_gauge_z' # magnetic field gauge origin
]
# 13
test_keys = [
'out_alllog', # output information for each processor, when parallel
'nurse', # for coders
'colour', # for coders, make their live colourful
't_in_h', # calculate the kinetic energy or not
'vl_in_h', # calculate the local potential or not
'vnl_in_h', # calculate the nonlocal potential or not
'zeeman_in_h', # calculate the zeeman term or not
'test_force', # test the force
'test_stress' # test the force
]
# 14
other_methods_keys = [
'mlwf_flag', # turn MLWF on or off
'opt_epsilon2', # calculate the dielectic function
'opt_nbands' # number of bands for optical calculation
]
# 15
vdw_d2_keys = [
'vdwD2', # calculate vdw-D2 or not
'vdwD2_scaling', # scaling of vdw-D2
'vdwD2_d', # damping parameter
'vdwD2_C6_file', # filename of C6
'vdwD2_C6_unit', # unit of C6, Jnm6/mol or eVA6
'vdwD2_R0_file', # filename of R0
'vdwD2_R0_unit', # unit of R0, A or Bohr
'vdwD2_model', # expression model of periodic structure, radius or period
'vdwD2_radius', # radius cutoff for periodic structure
'vdwD2_radius_unit', # unit of radius cutoff for periodic structure
'vdwD2_period' # periods of periodic structure
]
# 16
spectrum_keys = [
'spectral_type', # the type of the calculated spectrum
'spectral_method', # 0: tddft(linear response)
'kernel_type', # the kernel type: rpa, tdlda ...
'eels_method', # 0: hilbert_transform method; 1: standard method
'absorption_method', # 0: vasp's method 1: pwscf's method
'system', # the calculate system
'eta', # eta(Ry)
'domega', # domega(Ry)
'nomega', # nomega
'ecut_chi', # the dimension of chi matrix
'q_start', # the position of the first q point in direct coordinate
'q_direction', # the q direction
'nq', # the total number of qpoints for calculation
'out_epsilon', # output epsilon or not
'out_chi', # output chi or not
'out_chi0', # output chi0 or not
'fermi_level', # the change of the fermi_level(Ry)
'coulomb_cutoff', # turn on the coulomb_cutoff or not
'kmesh_interpolation', # calculting <i, 0|j, R>
'qcar', # (unit: 2PI/lat0)
'lcao_box', # the scale | |
import json
import logging
import re
import time
import os
from threading import Semaphore, Thread, current_thread
try:
from bs4 import BeautifulSoup
import requests
inited = 1
except ImportError:
inited = 0
try:
import vim
except ImportError:
vim = None
LC_BASE = os.environ['LEETCODE_BASE_URL']
LC_CSRF = LC_BASE + '/ensure_csrf/'
LC_LOGIN = LC_BASE + '/accounts/login/'
LC_GRAPHQL = LC_BASE + '/graphql'
LC_CATEGORY_PROBLEMS = LC_BASE + '/api/problems/{category}'
LC_PROBLEM = LC_BASE + '/problems/{slug}/description'
LC_TEST = LC_BASE + '/problems/{slug}/interpret_solution/'
LC_SUBMIT = LC_BASE + '/problems/{slug}/submit/'
LC_SUBMISSIONS = LC_BASE + '/api/submissions/{slug}'
LC_SUBMISSION = LC_BASE + '/submissions/detail/{submission}/'
LC_CHECK = LC_BASE + '/submissions/detail/{submission}/check/'
LC_PROBLEM_SET_ALL = LC_BASE + '/problemset/all/'
EMPTY_FREQUENCIES = [0, 0, 0, 0, 0, 0, 0, 0]
session = None
task_running = False
task_done = False
task_trigger = Semaphore(0)
task_name = ''
task_input = None
task_progress = ''
task_output = None
task_err = ''
log = logging.getLogger(__name__)
log.setLevel(logging.ERROR)
def enable_logging():
out_hdlr = logging.FileHandler('leetcode-vim.log')
out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO)
def _make_headers():
assert is_login()
headers = {'Origin': LC_BASE,
'Referer': LC_BASE,
'X-CSRFToken': session.cookies['csrftoken'],
'X-Requested-With': 'XMLHttpRequest'}
return headers
def _level_to_name(level):
if level == 1:
return 'Easy'
if level == 2:
return 'Medium'
if level == 3:
return 'Hard'
return ' '
def _state_to_flag(state):
if state == 'ac':
return 'X'
if state == 'notac':
return '?'
return ' '
def _status_to_name(status):
if status == 10:
return 'Accepted'
if status == 11:
return 'Wrong Answer'
if status == 12:
return 'Memory Limit Exceeded'
if status == 13:
return 'Output Limit Exceeded'
if status == 14:
return 'Time Limit Exceeded'
if status == 15:
return 'Runtime Error'
if status == 16:
return 'Internal Error'
if status == 20:
return 'Compile Error'
if status == 21:
return 'Unknown Error'
return 'Unknown State'
def _break_code_lines(s):
return s.replace('\r\n', '\n').replace('\xa0', ' ').split('\n')
def _break_paragraph_lines(s):
lines = _break_code_lines(s)
result = []
# reserve one and only one empty line between two non-empty lines
for line in lines:
if line.strip() != '': # a line with only whitespaces is also empty
result.append(line)
result.append('')
return result
def _remove_description(code):
eod = code.find('[End of Description]')
if eod == -1:
return code
eol = code.find('\n', eod)
if eol == -1:
return ''
return code[eol+1:]
def is_login():
return session and 'LEETCODE_SESSION' in session.cookies
def signin(username, password):
global session
session = requests.Session()
if 'cn' in LC_BASE:
res = session.get(LC_CSRF)
else:
res = session.get(LC_LOGIN)
if res.status_code != 200:
_echoerr('cannot open ' + LC_BASE)
return False
headers = {'Origin': LC_BASE,
'Referer': LC_LOGIN}
form = {'csrfmiddlewaretoken': session.cookies['csrftoken'],
'login': username,
'password': password}
log.info('signin request: headers="%s" login="%s"', headers, username)
# requests follows the redirect url by default
# disable redirection explicitly
res = session.post(LC_LOGIN, data=form, headers=headers, allow_redirects=False)
log.info('signin response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 302:
_echoerr('password incorrect')
return False
return True
def _get_category_problems(category):
headers = _make_headers()
url = LC_CATEGORY_PROBLEMS.format(category=category)
res = session.get(url, headers=headers)
if res.status_code != 200:
_echoerr('cannot get the category: {}'.format(category))
return []
problems = []
content = res.json()
for p in content['stat_status_pairs']:
# skip hidden questions
if p['stat']['question__hide']:
continue
problem = {'state': _state_to_flag(p['status']),
'id': p['stat']['question_id'],
'fid': p['stat']['frontend_question_id'],
'title': p['stat']['question__title'],
'slug': p['stat']['question__title_slug'],
'paid_only': p['paid_only'],
'ac_rate': p['stat']['total_acs'] / p['stat']['total_submitted'],
'level': _level_to_name(p['difficulty']['level']),
'favor': p['is_favor'],
'category': content['category_slug'],
'frequency': p['frequency']}
problems.append(problem)
return problems
def get_problems(categories):
assert is_login()
problems = []
for c in categories:
problems.extend(_get_category_problems(c))
return sorted(problems, key=lambda p: p['id'])
def get_problem(slug):
assert is_login()
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'query': '''query getQuestionDetail($titleSlug : String!) {
question(titleSlug: $titleSlug) {
questionId
title
content
stats
difficulty
codeDefinition
sampleTestCase
enableRunCode
translatedContent
}
}''',
'variables': {'titleSlug': slug},
'operationName': 'getQuestionDetail'}
log.info('get_problem request: url="%s" headers="%s" body="%s"', LC_GRAPHQL, headers, body)
res = session.post(LC_GRAPHQL, json=body, headers=headers)
log.info('get_problem response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the problem: {}'.format(slug))
return None
q = res.json()['data']['question']
content = q['translatedContent'] or q['content']
if content is None:
_echoerr('cannot get the problem: {}'.format(slug))
return None
soup = BeautifulSoup(content, features='html.parser')
problem = {}
problem['id'] = q['questionId']
problem['title'] = q['title']
problem['slug'] = slug
problem['level'] = q['difficulty']
problem['desc'] = _break_paragraph_lines(soup.get_text())
problem['templates'] = {}
for t in json.loads(q['codeDefinition']):
problem['templates'][t['value']] = _break_code_lines(t['defaultCode'])
problem['testable'] = q['enableRunCode']
problem['testcase'] = q['sampleTestCase']
stats = json.loads(q['stats'])
problem['total_accepted'] = stats['totalAccepted']
problem['total_submission'] = stats['totalSubmission']
problem['ac_rate'] = stats['acRate']
return problem
def _split(s):
# str.split has an disadvantage that ''.split('\n') results in [''], but what we want
# is []. This small function returns [] if `s` is a blank string, that is, containing no
# characters other than whitespaces.
if s.strip() == '':
return []
return s.split('\n')
def _check_result(submission_id):
global task_progress
if _in_task():
prog_stage = 'Uploading '
prog_bar = '.'
task_progress = prog_stage + prog_bar
while True:
headers = _make_headers()
url = LC_CHECK.format(submission=submission_id)
log.info('check result request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('check result response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the execution result')
return None
if _in_task():
prog_bar += '.'
r = res.json()
if r['state'] == 'SUCCESS':
prog_stage = 'Done '
break
elif r['state'] == 'PENDING':
prog_stage = 'Pending '
elif r['state'] == 'STARTED':
prog_stage = 'Running '
if _in_task():
task_progress = prog_stage + prog_bar
time.sleep(1)
result = {
'answer': r.get('code_answer', []),
'runtime': r['status_runtime'],
'state': _status_to_name(r['status_code']),
'testcase': _split(r.get('input', r.get('last_testcase', ''))),
'passed': r.get('total_correct') or 0,
'total': r.get('total_testcases') or 0,
'error': [v for k, v in r.items() if 'error' in k and v]
}
# the keys differs between the result of testing the code and submitting it
# for submission judge_type is 'large', and for testing judge_type does not exist
if r.get('judge_type') == 'large':
result['answer'] = _split(r.get('code_output', ''))
result['expected_answer'] = _split(r.get('expected_output', ''))
result['stdout'] = _split(r.get('std_output', ''))
result['runtime_percentile'] = r.get('runtime_percentile', '')
else:
# Test states cannot distinguish accepted answers from wrong answers.
if result['state'] == 'Accepted':
result['state'] = 'Finished'
result['stdout'] = r.get('code_output', [])
result['expected_answer'] = []
result['runtime_percentile'] = r.get('runtime_percentile', '')
result['expected_answer'] = r.get('expected_code_answer', [])
return result
def test_solution(problem_id, title, slug, filetype, code, test_input):
assert is_login()
code = _remove_description(code)
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'data_input': test_input,
'lang': filetype,
'question_id': str(problem_id),
'test_mode': False,
'typed_code': code}
url = LC_TEST.format(slug=slug)
log.info('test solution request: url="%s" headers="%s" body="%s"', url, headers, body)
res = session.post(url, json=body, headers=headers)
log.info('test solution response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
if 'too fast' in res.text:
_echoerr('you are sending the request too fast')
else:
_echoerr('cannot test the solution for ' + slug)
return None
result = _check_result(res.json()['interpret_id'])
result['testcase'] = test_input.split('\n')
result['title'] = title
return result
def test_solution_async(problem_id, title, slug, filetype, code, test_input):
assert is_login()
global task_input, task_name
if task_running:
_echoerr('there is other task running: ' + task_name)
return False
code = _remove_description(code)
task_name = 'test_solution'
task_input = [problem_id, title, slug, filetype, code, test_input]
task_trigger.release()
return True
def submit_solution(slug, filetype, code=None):
assert is_login()
problem = get_problem(slug)
if not problem:
return None
if code is None:
code = '\n'.join(vim.current.buffer)
code = _remove_description(code)
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'data_input': problem['testcase'],
'lang': filetype,
'question_id': str(problem['id']),
'test_mode': False,
'typed_code': code,
'judge_type': 'large'}
url = LC_SUBMIT.format(slug=slug)
log.info('submit solution request: url="%s" headers="%s" body="%s"', url, headers, body)
res = session.post(url, json=body, headers=headers)
log.info('submit solution response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
if 'too fast' in res.text:
_echoerr('you are sending the request too fast')
else:
_echoerr('cannot submit the solution for ' + slug)
return None
result = _check_result(res.json()['submission_id'])
result['title'] = problem['title']
return result
def submit_solution_async(slug, filetype, code=None):
assert is_login()
global task_input, task_name
if task_running:
_echoerr('there is other task running: ' + task_name)
return False
if code is None:
code = '\n'.join(vim.current.buffer)
task_name = 'submit_solution'
task_input = [slug, filetype, code]
task_trigger.release()
return True
def get_submissions(slug):
assert is_login()
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
url = LC_SUBMISSIONS.format(slug=slug)
log.info('get submissions request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('get submissions response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot find the submissions of problem: ' + slug)
return None
submissions = []
for r in res.json()['submissions_dump']:
s = {
'id': r['url'].split('/')[3],
'time': r['time'].replace('\xa0', ' '),
'status': r['status_display'],
'runtime': r['runtime'],
}
submissions.append(s)
return submissions
def _group1(match, default):
if match:
return match.group(1)
return default
def _unescape(s):
return s.encode().decode('unicode_escape')
def get_submission(sid):
assert is_login()
headers = _make_headers()
url = LC_SUBMISSION.format(submission=sid)
log.info('get submission request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('get submission response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot find the submission: ' + sid)
return None
# we need to parse the data from the Javascript snippet
s = res.text
| |
+ fns)
rejection_fraction = tns / (tns + fps)
fprs = fps / (fps + tns)
return rejection_fraction, tprs, fprs, thresholds
def disp_multiple_learn_hist(locations,losslim=None,show=True,titles=None,best_only=False,leg_font=10,title_font=10):
'''
Plots a grid of learning histories.
Args:
locations ... list of paths to directories of training dumps
losslim ... limit of loss axis
show ... bool, whether to show the plot
titles ... list of titles for each plot in the grid
best_only ... bool, whether to plot only the points where best model was saved
leg_font ... legend font size
author: <NAME>
June 2020
'''
ncols = len(locations) if len(locations) < 3 else 3
nrows = math.ceil(len(locations)/3)
if nrows==1 and ncols==1: fig = plt.figure(facecolor='w',figsize=(12,12))
else: fig = plt.figure(facecolor='w',figsize=(12,nrows*4))
gs = gridspec.GridSpec(nrows,ncols,figure=fig)
axes = []
for i,location in enumerate(locations):
train_log=location+'/log_train.csv'
val_log=location+'/log_val.csv'
train_log_csv = pd.read_csv(train_log)
val_log_csv = pd.read_csv(val_log)
if best_only:
best_idxs = [0]
best_epoch=0
best_loss = val_log_csv.loss[0]
for idx,loss in enumerate(val_log_csv.loss):
if loss < best_loss:
best_loss=loss
best_idxs.append(idx)
best_epoch=val_log_csv.epoch[idx]
val_log_csv = val_log_csv.loc[best_idxs]
if titles is not None:
titles[i] = titles[i] + ", Best Val Loss ={loss:.4f}@Ep.{epoch:.2f}".format(loss=best_loss,epoch=best_epoch)
ax1=fig.add_subplot(gs[i],facecolor='w') if i ==0 else fig.add_subplot(gs[i],facecolor='w',sharey=axes[0])
ax1.set_xlim(0,train_log_csv.epoch.max())
axes.append(ax1)
line11 = ax1.plot(train_log_csv.epoch, train_log_csv.loss, linewidth=2, label='Train loss', color='b', alpha=0.3)
line12 = ax1.plot(val_log_csv.epoch, val_log_csv.loss, marker='o', markersize=3, linestyle='', label='Validation loss', color='blue')
if losslim is not None:
ax1.set_ylim(None,losslim)
if titles is not None:
ax1.set_title(titles[i],size=title_font)
ax2 = ax1.twinx()
line21 = ax2.plot(train_log_csv.epoch, train_log_csv.accuracy, linewidth=2, label='Train accuracy', color='r', alpha=0.3)
line22 = ax2.plot(val_log_csv.epoch, val_log_csv.accuracy, marker='o', markersize=3, linestyle='', label='Validation accuracy', color='red')
ax1.set_xlabel('Epoch',fontweight='bold',fontsize=24,color='black')
ax1.tick_params('x',colors='black',labelsize=18)
ax1.set_ylabel('Loss', fontsize=24, fontweight='bold',color='b')
ax1.tick_params('y',colors='b',labelsize=18)
ax2.set_ylabel('Accuracy', fontsize=24, fontweight='bold',color='r')
ax2.tick_params('y',colors='r',labelsize=18)
ax2.set_ylim(0.,1.05)
lines = line11 + line12 + line21 + line22
labels = [l.get_label() for l in lines]
leg = ax2.legend(lines, labels, fontsize=16, loc=5, numpoints=1,prop={'size':leg_font})
leg_frame = leg.get_frame()
leg_frame.set_facecolor('white')
gs.tight_layout(fig)
return fig
# Function to plot a grid of confusion matrices
def plot_multiple_confusion_matrix(label_arrays, prediction_arrays, class_names,titles=None):
"""
plot_multiple_confusion_matrix(label_arrays, prediction_arrays, class_names,titles=None)
Purpose : Plot the confusion matrix for a series of test outputs.
Args: label_arrays ... array of 1D arrays of true label value, the length = sample size
predictions ... array of 1D arrays of predictions, the length = sample size
class_names ... 1D array of string label for classification targets, the length = number of categories
author: <NAME>
May 2020
"""
fig = plt.figure(facecolor='w',figsize=(16,8))
gs = gridspec.GridSpec(math.ceil(len(label_arrays)/3),3,figure=fig)
axes = []
for i,labels in enumerate(label_arrays):
predictions = prediction_arrays[i]
ax=fig.add_subplot(gs[i],facecolor='w')
num_labels = len(class_names)
max_value = np.max([np.max(np.unique(labels)),np.max(np.unique(labels))])
assert max_value < num_labels
mat,_,_,im = ax.hist2d(predictions, labels,
bins=(num_labels,num_labels),
range=((-0.5,num_labels-0.5),(-0.5,num_labels-0.5)),cmap=plt.cm.Blues)
# Normalize the confusion matrix
mat = mat.astype("float") / mat.sum(axis=0)
cbar = plt.colorbar(im, ax=ax)
cbar.ax.tick_params(labelsize=20)
ax.set_xticks(np.arange(num_labels))
ax.set_yticks(np.arange(num_labels))
ax.set_xticklabels(class_names,fontsize=20)
ax.set_yticklabels(class_names,fontsize=20)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
plt.setp(ax.get_yticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
ax.set_xlabel('Prediction',fontsize=20)
ax.set_ylabel('True Label',fontsize=20)
if titles is not None:
ax.set_title(titles[i])
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
ax.text(i,j, r"${0:0.3f}$".format(mat[i,j]),
ha="center", va="center", fontsize=20,
color="white" if mat[i,j] > (0.5*mat.max()) else "black")
gs.tight_layout(fig)
return fig
def load_test_output(location,index_path,remove_flagged=True):
"""
load_test_output(location,index_path)
Purpose : Load output of a test run on the full h5 test set,
remove FiTQun flagged/failed events, and return a dict of results.
Args: location ... string, path of the directory containing the test
output eg. '/home/cmacdonald/CNN/dumps/20200525_152544/test_validation_iteration_dump.npz'
index_path ... string, path of directory containing indices of FiTQun failed and flagged files
author: <NAME>
May 2020
"""
test_dump_np = np.load(location, allow_pickle=True)
res_predictedlabels = np.concatenate(list([batch_array for batch_array in test_dump_np['predicted_labels']]))
res_softmaxes = np.concatenate(list([batch_array for batch_array in test_dump_np['softmax']]))
res_labels = np.concatenate(list([batch_array for batch_array in test_dump_np['labels']]))
res_energies = np.concatenate(list([batch_array for batch_array in test_dump_np['energies']]))
res_rootfiles = np.concatenate(list([batch_array for batch_array in test_dump_np['rootfiles']]))
res_eventids = np.concatenate(list([batch_array for batch_array in test_dump_np['eventids']]))
res_angles = np.concatenate(list([batch_array for batch_array in test_dump_np['angles']]))
failed_idxs = np.load(os.path.join(index_path, 'fq_failed_idxs.npz'),allow_pickle=True)['failed_indices_pointing_to_h5_test_set'].astype(int)
flagged_idxs = np.load(os.path.join(index_path, 'fq_flagged_idxs.npz'),allow_pickle=True)['arr_0'].astype(int)
sres_predictedlabels = np.delete(res_predictedlabels,failed_idxs)
sres_softmaxes = np.delete(res_softmaxes,failed_idxs,0)
sres_labels = np.delete(res_labels,failed_idxs)
sres_energies = np.delete(res_energies,failed_idxs)
sres_rootfiles = np.delete(res_rootfiles,failed_idxs)
sres_eventids = np.delete(res_eventids,failed_idxs)
sres_angles = np.delete(res_angles,failed_idxs,0)
if remove_flagged:
filtered_res_predictedlabels = np.delete(sres_predictedlabels,flagged_idxs)
filtered_res_softmaxes = np.delete(sres_softmaxes,flagged_idxs,0)
filtered_res_labels = np.delete(sres_labels,flagged_idxs)
filtered_res_energies = np.delete(sres_energies,flagged_idxs)
filtered_res_rootfiles = np.delete(sres_rootfiles,flagged_idxs)
filtered_res_eventids = np.delete(sres_eventids,flagged_idxs)
filtered_res_angles = np.delete(sres_angles,flagged_idxs,0)
return{'filtered_predictions':filtered_res_predictedlabels,
'filtered_softmaxes':filtered_res_softmaxes,
'filtered_labels':filtered_res_labels,
'filtered_energies':filtered_res_energies,
'filtered_rootfiles':filtered_res_rootfiles,
'filtered_eventids':filtered_res_eventids,
'filtered_angles':filtered_res_angles
}
else:
return{'s_predictions':sres_predictedlabels,
's_softmaxes':sres_softmaxes,
's_labels':sres_labels,
's_energies':sres_energies,
's_rootfiles':sres_rootfiles,
's_eventids':sres_eventids,
's_angles':sres_angles
}
def parametrized_ray_point(x,y,z,theta,phi,t):
'''
parametrized_ray_point(x,y,z,theta,phi,t)
Purpose: Find the point of a line departing (x,y,z) in direction specified by (theta,phi) and parametrized by t at
given value of t.
Args: x, y, z ... origin co-ordinates of line
theta, phi ... polar and azimuthal angles of departure
t ... parameter giving desired point
author: <NAME>
May 2020
'''
return x + np.sin(theta)*np.cos(phi)*t,y + np.sin(theta)*np.sin(phi)*t, z + np.cos(theta)*t
def distance_to_wall(position, angle):
"""
distance_to_wall(position, angle)
Purpose : Calculate distance from event origin to IWCD wall along particle trajectory.
Args: position ... array of [x, y, z] co-ordinates of event origin
angle ... array of [theta, phi] angles of departure
author: <NAME>
May 2020
"""
x = position[0]
y = position[2]
z = position[1]
theta = angle[0]
phi = angle[1]
no_radial=False
sols = []
#Solve for intersections of parametrized path with the cylinder and caps, keep only positive parameter solns
try:
shared_expression = np.sqrt(-np.sin(theta)**2*(-275282+(x**2 + y**2)
+ (y**2 - x**2)*np.cos(2*phi)-2*x*y*np.sin(2*phi)))/(np.sin(theta)*np.sqrt(2))
except:
no_radial=True
if not no_radial:
try:
radial_parameter_sol_1 = -1/np.sin(theta)*(x*np.cos(phi)+y*np.sin(phi)
+shared_expression)
if radial_parameter_sol_1 > 0: sols.append(radial_parameter_sol_1)
except:
pass
try:
radial_parameter_sol_2 = 1/np.sin(theta)*(-x*np.cos(phi)-y*np.sin(phi)
+shared_expression)
if radial_parameter_sol_2 > 0: sols.append(radial_parameter_sol_2)
except:
pass
try:
cap_parameter_sol_top = (521 - z)/np.cos(theta)
cap_parameter_sol_bottom = -(521+z)/np.cos(theta)
if cap_parameter_sol_top > 0: sols.append(cap_parameter_sol_top)
if cap_parameter_sol_bottom > 0: sols.append(cap_parameter_sol_bottom)
except:
pass
sols = np.sort(sols)
x_int,y_int,z_int = parametrized_ray_point(x,y,z,theta,phi,sols[0])
return np.sqrt((x-x_int)**2+(y-y_int)**2+(z-z_int)**2)
def plot_compare_dists(dists,dist_idxs_to_compare,dist_idxs_reference,
labels,axes=None,colors=None,bins=20,
title=None, ratio_range=None,xlabel=None,
linestyle=None):
'''
Plot distributions and plot their ratio.
Args:
dists ... list of 1d arrays
dist_idxs_to_compare ... list of indices of distributions to use as numerator
in the ratio plot
dist_idxs_reference ... list of indices of distributions to use as denominator
in the ratio plot
labels ... list of labels for each distribution
axes ... optional, list of two matplotlib.pyplot.axes on which
to place the plots
colors ... list of colors to use for each distribution
bins ... number of bins to use in histogram
title ... plot title
ratio_range ... range of distribution range to plot
xlabel ... x-axis label
linestyle ... list of linestyles to use for each distribution
author: <NAME>
June 2020
'''
ret = False
if axes is None:
fig, axes = plt.subplots(2,1,figsize=(12,12))
ret = True
axes = axes.flatten()
ax = axes[0]
ns, bins, patches = ax.hist(dists, weights=[np.ones(len(dists[i]))*1/len(dists[i]) for i in range(len(dists))],
label=labels,histtype=u'step',bins=bins,color=colors,alpha=0.8)
if linestyle is not None:
for i,patch_list in enumerate(patches):
for patch in patch_list:
patch.set_linestyle(linestyle[i])
ax.legend()
if title is not None: ax.set_title(title)
ax2 = axes[1]
for i,idx in enumerate(dist_idxs_to_compare):
lines = ax2.plot(bins[:-1],
ns[idx] / ns[dist_idxs_reference[i]],
alpha=0.8,label='{} to {}'.format(labels[idx],labels[dist_idxs_reference[i]]))
lines[0].set_color(patches[idx][0].get_edgecolor())
lines[0].set_drawstyle('steps')
if ratio_range is not None: ax2.set_ylim(ratio_range)
ax2.legend()
ax2.set_title('Ratio of Distributions')
lines = ax2.plot(bins[:-1],np.ones(len(bins)-1),color='k',alpha=0.5)
lines[0].set_linestyle('-.')
if xlabel is not None:
ax.set_xlabel(xlabel)
ax2.set_xlabel(xlabel)
if ret: return fig
def plot_2d_ratio(dist_1_x,dist_1_y,dist_2_x, dist_2_y,bins=(150,150),fig=None,ax=None,
title=None, xlabel=None, ylabel=None, ratio_range=None):
'''
Plots the 2d ratio between the 2d histograms of two distributions.
Args:
dist_1_x: ... 1d array of x-values of distribution 1 of length n
dist_1_y: ... 1d array of y-values of distribution 1 of length n
dist_2_x: ... 1d array of x-values of distribution 2 of length n
dist_2_y: ... 1d array of y-values of distribution 2 of length n
bins: ... tuple of integer numbers of bins in x and y
author: <NAME>
May 2020
'''
if ax is None: fig,ax = plt.subplots(1,1,figsize=(8,8))
bin_range = [[np.min([np.min(dist_1_x),np.min(dist_2_x)]),np.max([np.max(dist_1_x),np.max(dist_2_x)])],
[np.min([np.min(dist_1_y),np.min(dist_2_y)]),np.max([np.max(dist_1_y),np.max(dist_2_y)])]]
ns_1, xedges, yedges = np.histogram2d(dist_1_x,dist_1_y,bins=bins,density=True,range=bin_range)
ns_2,_,_ = np.histogram2d(dist_2_x,dist_2_y,bins=bins,density=True,range=bin_range)
ratio = ns_1/ns_2
ratio = np.where((ns_2==0) & (ns_1==0),1,ratio)
ratio = np.where((ns_2==0) & (ns_1!=0),10,ratio)
pc = ax.pcolormesh(xedges, yedges, np.swapaxes(ratio,0,1),vmin=ratio_range[0],vmax=ratio_range[1],cmap="RdBu_r")
fig.colorbar(pc, ax=ax)
if title is not None: ax.set_title(title)
if xlabel is not None: ax.set_xlabel(xlabel)
if ylabel is not None: ax.set_ylabel(ylabel)
return fig
a = np.ones((100,100))
[i for i in itertools.product(range(3),range(3))]
def binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
'''
SOURCE: Scikit.metrics internal usage tool
'''
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int or str, default=None
The label of | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import builtins
import math
import warnings
import inspect
from functools import partial
import tensorflow as tf
from trident.backend.common import TensorShape
from trident.backend.tensorflow_backend import *
from trident.backend.tensorflow_ops import *
from trident.backend.common import get_function, camel2snake
__all__ = ['kaiming_uniform', 'kaiming_normal','xavier_uniform','xavier_normal','trunc_normal','fill_zeros','fill_ones']
def calculate_gain(nonlinearity, param=None):
r"""Return the recommended gain value for the given nonlinearity function.
The values are as follows:
================= ====================================================
nonlinearity gain
================= ====================================================
Linear / Identity :math:`1`
Conv{1,2,3}D :math:`1`
Sigmoid :math:`1`
Tanh :math:`\frac{5}{3}`
ReLU :math:`\sqrt{2}`
Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}`
================= ====================================================
Args:
nonlinearity: the non-linear function (`nn.functional` name)
param: optional parameter for the non-linear function
Examples:
>>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2
"""
linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
return 1
elif nonlinearity == 'tanh':
return 5.0 / 3
elif nonlinearity == 'relu':
return math.sqrt(2.0)
elif nonlinearity == 'leaky_relu':
if param is None:
negative_slope = 0.01
elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
# True/False are instances of int, hence check above
negative_slope = param
else:
raise ValueError("negative_slope {} not a valid number".format(param))
return math.sqrt(2.0 / (1 + negative_slope ** 2))
else:
raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
def _calculate_fan_in_and_fan_out(tensor):
dimensions = len(tensor.shape)
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
num_input_fmaps = int_shape(tensor)[-1]
num_output_fmaps = int_shape(tensor)[0]
receptive_field_size = 1
if dimensions > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def _calculate_correct_fan(tensor, mode):
mode = mode.lower()
valid_modes = ['fan_in', 'fan_out']
if mode not in valid_modes:
raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes))
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
return fan_in if mode == 'fan_in' else fan_out
def uniform(tensor, a=0., b=1.):
# type: (Tensor, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the lower bound of the uniform distribution
b: the upper bound of the uniform distribution
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.uniform_(w)
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
weight.assign(random_uniform_like(weight, a=a,b=b))
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
tensor.assign(random_uniform_like(tensor, a=a,b=b))
def normal(tensor, mean=0., std=1.):
# type: (Tensor, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.normal_(w)
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
weight.assign(random_normal_like(weight,mean=mean,std=std))
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
tensor.assign(random_normal_like(tensor,mean=mean,std=std))
def fill_zeros(tensor):
# type: (Tensor) -> Tensor
r"""Fills the input Tensor with the scalar value `0`.
Args:
tensor: an n-dimensional `torch.Tensor`
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.zeros_(w)
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable :
weight.assign(zeros_like(weight))
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
tensor.assign(zeros_like(tensor))
def fill_ones(tensor):
# type: (Tensor) -> Tensor
r"""Fills the input Tensor with the scalar value `1`.
Args:
tensor: an n-dimensional `torch.Tensor`
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.ones_(w)
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
weight.assign(ones_like(weight))
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
tensor.assign(ones_like(tensor))
def kaiming_uniform(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
r"""Fills the input `Tensor` with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - He, K. et al. (2015), using a
uniform distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-\text{bound}, \text{bound})` where
.. math::
\text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}
Also known as He initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
Examples:
>>> w = zeros((3, 5))
>>> kaiming_uniform(w, mode='fan_in', nonlinearity='relu')
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
kaiming_uniform(weight, a, mode, nonlinearity)
elif isinstance(tensor, tf.Variable) and tensor.trainable == True:
tensor_data = tensor.value()
fan = to_numpy(_calculate_correct_fan(tensor_data, mode)).mean()
gain = calculate_gain(nonlinearity, a)
std = true_divide(gain, math.sqrt(fan))
bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
tensor.assign(random_uniform_like(tensor_data, -bound, bound, tensor_data.dtype))
def kaiming_normal(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
r"""Fills the input `Tensor` with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - <NAME>. et al. (2015), using a
normal distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \frac{\text{gain}}{\sqrt{\text{fan\_mode}}}
Also known as He initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
"""
if isinstance(tensor, tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
kaiming_normal(weight, a, mode, nonlinearity)
elif isinstance(tensor, tf.Variable) and tensor.trainable == True:
tensor_data=tensor.value()
fan = to_numpy(_calculate_correct_fan(tensor_data, mode)).mean()
gain = calculate_gain(nonlinearity, a)
std = true_divide(gain , math.sqrt(fan))
tensor.assign(random_normal_like(tensor_data,0, std, tensor_data.dtype))
def xavier_uniform(tensor, gain=1.):
# type: (Tensor, float) -> Tensor
r"""Fills the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - <NAME>. & <NAME>. (2010), using a uniform
distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
gain: an optional scaling factor
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
xavier_uniform(weight, gain)
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
tensor.assign(random_uniform_like(tensor, -a, a))
def xavier_normal(tensor, gain=1.):
# type: (Tensor, float) -> Tensor
r"""Fills the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - <NAME>. & <NAME>. (2010), using a normal
distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
gain: an optional scaling factor
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.xavier_normal_(w)
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
xavier_normal(weight, gain)
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
tensor.assign(random_normal_like(tensor, 0, std))
def trunc_normal(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
weight.assign(tf.random.truncated_normal(weight.shape,mean=mean, std=std, a=a, b=b))
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
tensor.assign(tf.random.truncated_normal(tensor.shape,mean=mean, std=std, a=a, b=b))
def get_initializer(initializer,**kwargs):
if isinstance(initializer,str):
initializer_fn = get_function(camel2snake(initializer), ['trident.backend.tensorflow_initializers'])
initializer_fn=partial(initializer_fn,**kwargs) if len(kwargs)>0 else initializer_fn
return initializer_fn
elif | |
<reponame>jjisnow/pytube<gh_stars>0
""" Downloader of video website links.
Will download video and mux with audio, or audio only, or video with audio
already. It will also download and retime caption subtitles using pysrt.
Usage:
downloader.py [URL...] [--verbose | --quiet] [--itag value] [--lang string]
[--list] [--duration HH:MM:SS.milliseconds] [--start HH:MM:SS.milliseconds]
Arguments:
URL individual websites to download video from
Options:
-h, --help Show this screen
-v, --verbose Show verbose output
-q, --quiet Run quietly
-i, --itag value The stream to download
--lang string The caption language to download [default: English]
-l, --list List streams and exit
-d, --duration t Download [[HH:]MM:]SS[.milliseconds] of clip
-s, --start s Start download at [[HH:]MM:]SS[.milliseconds]
"""
import datetime
import math
import os
import shutil
import sys
from functools import wraps
from pprint import pformat
import time
import pysrt
from pytube import YouTube
import subprocess
from pathlib import Path
import logging
from docopt import docopt
from tabulate import tabulate
from typing import Union
def timing(fn):
'''Timing decorator for program - calculates runtime'''
@wraps(fn)
def wrap(*args, **kw):
time_start = time.time()
result = fn(*args, **kw)
time_end = time.time()
run_secs = time_end - time_start
date_secs = datetime.datetime.utcfromtimestamp(run_secs)
run_time = date_secs.time()
logging.info(f'function:{fn.__name__} args:[{args}, {kw}]')
logging.info(f' --- {run_time.isoformat(timespec="milliseconds")} secs --- ')
return result
return wrap
@timing
def downloader(*args: tuple):
''' main interface for downloader file
'''
arguments = parse_arguments(args)
config_loggers(arguments)
arguments = check_url(arguments)
check_requirements('aria2c', 'ffmpeg')
for file in arguments['URL']:
logging.debug(f"Parsing url: {file}")
yt = YouTube(file)
streams = yt.streams
stream_table = parse_streams(streams)
if arguments['--list']:
return stream_table
itag = get_itag(arguments)
target_stream = streams.get_by_itag(itag)
logging.info("DOWNLOADING:")
video_path, audio_path, subtitle_path, video_fps = [None] * 4
if not target_stream.includes_audio_track:
logging.info("downloading video first......")
video_path = download_file(target_stream,
args=arguments,
)
video_fps = target_stream.fps
logging.info("downloading audio as well!")
audio_target = streams.filter(only_audio=True).first()
audio_path = download_file(audio_target,
args=arguments,
)
else:
logging.info(f"downloading {target_stream.type} ONLY")
if target_stream.type == 'video':
video_path = download_file(target_stream,
args=arguments,
)
video_fps = target_stream.fps
elif target_stream.type == 'audio':
audio_target = target_stream
audio_path = download_file(audio_target,
args=arguments,
)
else:
logging.critical(
f"unexpected file type: {target_stream.type}")
return 1
# need to retime the captions if I'm to use them in shorter videos
if not target_stream.type == 'audio':
subtitle_path = download_captions(yt, lang=arguments['--lang'],
duration=arguments['--duration'],
start=arguments['--start'])
# In the event only audio, create HQ mp3 or aac file
if target_stream.type == 'audio':
if (audio_path.suffix == '.webm' and target_stream.audio_codec == 'opus') \
or (
audio_path.suffix == '.mp4' and 'mp4' in target_stream.audio_codec):
final_path = make_mp3(audio_path) # the default
# final_fp = make_aac(audio_path) # not supported by all platforms
# final_fp = make_ogg(audio_path) # not supported by all platforms
else:
final_path = mux_files(audio_path)
else:
final_path = mux_files(audio_path, video_path, subtitle_path, video_fps)
cleanup_files(audio_path, video_path, subtitle_path)
logging.info(f"Final output file: {final_path}")
return final_path
def parse_arguments(args: tuple) -> dict:
'''set arguments dictionary from supplied arguments'''
arguments = docopt(__doc__, argv=args, help=True)
if arguments['--verbose']:
log_level = logging.DEBUG
elif arguments['--quiet']:
log_level = logging.CRITICAL
else:
log_level = logging.INFO
arguments['log_level'] = log_level
return arguments
def config_loggers(args: dict) -> None:
""" displays the supplied arguments to stdout before switching back to
the stderr handler
:param args:
:param log_level:
:return:
"""
log_level = args['log_level']
logging.basicConfig(level=log_level)
logger = logging.getLogger()
# These lines are needed to create a stdout handler
# stdout_handler = logging.StreamHandler(stream=sys.stdout)
# stdout_handler.setLevel(log_level)
# logger.addHandler(stdout_handler)
#
# root_handler = logger.handlers[0]
# root_handler.setLevel(log_level)
# logger.removeHandler(root_handler)
logging.info(f"Supplied args: \n {args}")
# logger.removeHandler(stdout_handler)
# logger.addHandler(root_handler)
def check_url(args: dict) -> dict:
''' parse the url and obtain one if none provided
Use a provided link or the args provided
'''
while len(args['URL']) == 0:
link = input("Provide a youtube link to download: ")
args['URL'].append(link)
if args['URL'][0] == '':
print("a link must be supplied!")
del args['URL'][0]
logging.info(f"Final args: {args}")
return args
def check_requirements(*args) -> None:
'''ensure executables supplied exist on the file system'''
logging.debug(f'Requirements: {args}')
for arg in args:
status = shutil.which(f'{arg}')
if status is not None:
logging.debug(f'Requirement: {arg} met with {status}')
else:
logging.error(f'Requirement: {arg} not met! status: {status}')
raise Exception(f'Requirement: {arg} not met! status: {status}')
def parse_streams(streams) -> str:
'''
take yt.streams.all() and print it as a table for viewing
'''
final_list = []
for stream in streams:
stream = str(stream).strip('<>').replace('Stream: ', '').split(' ')
stream_dict = {}
for item in stream:
a = item.split('=')
k = a[0]
v = a[1].strip('"')
stream_dict[k] = v
final_list.append(stream_dict)
stream_table = tabulate(final_list, headers="keys")
print(stream_table)
return stream_table
def get_itag(args: dict) -> int:
while True:
if args['--itag']:
itag = args['--itag']
break
try:
itag = int(input("Which stream do you want? (specify itag): "))
break
except ValueError:
logging.error("you need to provide a number!")
return itag
def download_file(download_target, args: dict = (), ) -> Path:
'''download stream given a download_target (a stream object either audio or video,
captions are handled separately).
args['duration'] and args['start'] are used to specify time dimensions
Note that ffmpeg already has a HH:MM:SS.ms specification limited to 2 digits for
HH, MM and SS
'''
logging.debug(f"current directory: {Path.cwd()}")
logging.info(f"Downloading itag: {download_target.itag}")
logging.info(f"Download url: {download_target.url}")
download_path = Path(download_target.default_filename)
# set local defaults for arguments passed
args = dict(args)
duration = args.get('--duration', None)
start = args.get('--start', '0')
download_path = Path(f"{download_path.stem}-{download_target.type}{download_path.suffix}")
logging.debug(f"Targeting destination: {download_path}")
if duration:
# download the file with ffmpeg
# -ss : start point to download in HH:MM:SS.MILLISECONDS format if needed
# -t : duration to download in seconds
# -to: end point to download as above format. -t takes precedence
# NB: -ss before -i sets the -to origin to zero at the cut point
# -copyts: allows -to to refer to start of clip, no the cut point.
# removed individual codec copy encode commands because keyframes need
# to match downloaded time.
logging.debug(f"attempting to download {duration} seconds of file")
cmd = (f'ffmpeg',
'-y',
'-ss', f'{start}',
'-i', f'{download_target.url}',
'-t', f'{duration}',
'-c', 'copy',
f'{download_path}')
else:
# download the entire file with aria
# -c : continue/resume downloads
# -j : number of parallel downloads for 1 link
# --optimize-concurrent-downloads=true: optimise speed
# -x : max connections per server
# -k : min split size
# -s, --split=N: Download using N connections
cmd = ('aria2c',
'--continue=true',
'-j5', '-x5',
'--optimize-concurrent-downloads=true',
'-k', '1M',
'--split=5',
'-o', f'{download_path}',
f'{download_target.url}')
logging.debug(f"Command to be run: {cmd}")
subprocess.run(cmd, shell=False, check=True)
logging.info(f"Final {download_target.type} file: {download_path}")
return download_path
def download_captions(yt: YouTube, lang: str = 'English',
duration: str = None, start: str = None) -> Union[Path, None]:
i = None
caption_list = list(yt.captions.lang_code_index.values())
captions = enumerate(caption_list)
captions_string = pformat(captions)
logging.debug(f'captions available: {captions_string}')
for index, c in captions:
logging.debug(f'{index} index: {c} caption')
if lang in str(c):
i = index
logging.debug(f'found {lang} captions at index {i}')
break
if i is None:
logging.debug(f'No {lang} Captions found!')
return None
subt_base = Path(yt.fmt_streams[0].default_filename).stem
subt_fp = Path(f'{subt_base}-captions.srt')
if os.path.exists(subt_fp):
logging.info(f'File {subt_fp} exists already!! Deleting')
os.remove(subt_fp)
logging.debug(f'Writing {subt_fp}')
lines = yt.caption_tracks[i].generate_srt_captions()
subt_fp.write_text(lines, encoding='utf-8')
# retime the subtitles
if start or duration:
logging.info(f'retiming subtitles {subt_fp}')
subs = pysrt.open(subt_fp)
if start:
start = float(strp_time(start))
subs.shift(seconds=-math.trunc(start),
milliseconds=-math.trunc((start % 1) * 1000))
part = subs.slice(starts_after={'milliseconds': -1})
if duration:
duration = float(strp_time(duration))
part = part.slice(ends_before={'seconds' : math.trunc(duration),
'milliseconds': math.trunc(
(duration % 1) * 1000)})
if len(part) < 1:
logging.info(f'No valid subtitles left, removing {subt_fp} file')
os.remove(subt_fp)
return None
part.save(subt_fp)
return subt_fp
def strp_time(time_str: str) -> str:
''' returns corrected number of seconds given a variation of HH:MM:SS.milliseconds string'''
if ':' not in time_str:
return time_str
else:
secs = 0
time_parts = time_str.split(':')
for i, n in enumerate(reversed(time_parts)):
secs += 60 ** i * float(n)
return str(secs)
def mux_files(audio_path: Path, video_path: Path = None,
subt_path: Path = None, video_fps: str = None) -> Path:
'''mux file streams supplied'''
logging.info("attempting to mix audio and video")
# -y: global ie overwrite without asking
# -i: input file
# -r: set frame rate in fps
# -filter:a create filtergraph
# -c:a copy means copy audio streams
# -c:v copy means copy video stream codec
# -c:s srt means copy subtitles as srt
# -filter:a aresample=async=1 means resample audio to fit frame rates
if video_path:
# removes "-video" from name end
if video_path.stem.endswith('-video'):
final_path = Path(video_path.stem[:-6]).with_suffix(video_path.suffix)
elif audio_path:
# leaves "-audio" on end for only audio files
final_path = audio_path
else:
logging.error("no audio or video file path supplied")
# Using '.mkv' to handle subtitles for time being
final_path = Path(f'{final_path.stem}-output.mkv')
audio_path_text = ('-i', f'{audio_path}') if audio_path else ()
video_path_text = ('-i', f'{video_path}') | |
<filename>tests/test_travel.py
from .helpers import IFPTestCase
from intficpy.exceptions import IFPError
from intficpy.room import Room
from intficpy.things import Container, Surface, Lock
from intficpy.travel import (
travelN,
travelNE,
travelE,
travelSE,
travelS,
travelSW,
travelW,
travelNW,
travelU,
travelD,
travelIn,
travelOut,
TravelConnector,
DoorConnector,
LadderConnector,
StaircaseConnector,
)
class TestDirectionTravel(IFPTestCase):
def test_cannot_travel_if_not_connection(self):
room1 = Room(self.game, "A place", "Description of a place. ")
self.assertTrue(
(
not room1.north
and not room1.northeast
and not room1.east
and not room1.southeast
and not room1.south
and not room1.southwest
and not room1.west
and not room1.northwest
and not room1.up
and not room1.down
and not room1.entrance
and not room1.exit
),
"This test needs room1 to have no directional connections.",
)
self.me.location.removeThing(self.me)
room1.addThing(self.me)
self.game.turnMain("n")
self.assertIs(self.me.location, room1)
self.assertEqual(self.app.print_stack.pop(), room1.n_false_msg)
self.game.turnMain("ne")
self.assertIs(self.me.location, room1)
self.assertEqual(self.app.print_stack.pop(), room1.ne_false_msg)
self.game.turnMain("e")
self.assertIs(self.me.location, room1)
self.assertEqual(self.app.print_stack.pop(), room1.e_false_msg)
self.game.turnMain("se")
self.assertIs(self.me.location, room1)
self.assertEqual(self.app.print_stack.pop(), room1.se_false_msg)
self.game.turnMain("s")
self.assertIs(self.me.location, room1)
self.assertEqual(self.app.print_stack.pop(), room1.s_false_msg)
self.game.turnMain("sw")
self.assertIs(self.me.location, room1)
self.assertEqual(self.app.print_stack.pop(), room1.sw_false_msg)
self.game.turnMain("w")
self.assertIs(self.me.location, room1)
self.assertEqual(self.app.print_stack.pop(), room1.w_false_msg)
self.game.turnMain("nw")
self.assertIs(self.me.location, room1)
self.assertEqual(self.app.print_stack.pop(), room1.nw_false_msg)
self.game.turnMain("u")
self.assertIs(self.me.location, room1)
self.assertEqual(self.app.print_stack.pop(), room1.u_false_msg)
self.game.turnMain("d")
self.assertIs(self.me.location, room1)
self.assertEqual(self.app.print_stack.pop(), room1.d_false_msg)
self.game.turnMain("in")
self.assertIs(self.me.location, room1)
self.assertEqual(self.app.print_stack.pop(), room1.in_false_msg)
self.game.turnMain("out")
self.assertIs(self.me.location, room1)
self.assertEqual(self.app.print_stack.pop(), room1.out_false_msg)
def test_travel_north_south(self):
room1 = Room(self.game, "A place", "Description of a place. ")
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
room1.north = room2
room2.south = room1
self.me.location.removeThing(self.me)
room1.addThing(self.me)
self.assertIs(
self.me.location, room1, "This test needs the user to start in room1"
)
self.game.turnMain("go n")
self.assertEqual(self.app.print_stack[-3], room1.n_msg)
self.assertIs(
self.me.location,
room2,
f"Tried to travel north to {room2}, '{room2.name}', but player in "
f"{self.me.location}",
)
self.game.turnMain("s")
self.assertEqual(self.app.print_stack[-3], room1.s_msg)
self.assertIs(
self.me.location,
room1,
f"Tried to travel south to {room1}, '{room1.name}', but player in "
f"{self.me.location}",
)
def test_travel_northeast_southwest(self):
room1 = Room(self.game, "A place", "Description of a place. ")
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
room1.northeast = room2
room2.southwest = room1
self.me.location.removeThing(self.me)
room1.addThing(self.me)
self.assertIs(
self.me.location, room1, "This test needs the user to start in room1"
)
self.game.turnMain("go northeast")
self.assertEqual(self.app.print_stack[-3], room1.ne_msg)
self.assertIs(
self.me.location,
room2,
f"Tried to travel northeast to {room2}, '{room2.name}', but player in "
f"{self.me.location}",
)
self.game.turnMain("southwest")
self.assertEqual(self.app.print_stack[-3], room1.sw_msg)
self.assertIs(
self.me.location,
room1,
f"Tried to travel southwest to {room1}, '{room1.name}', but player in "
f"{self.me.location}",
)
def test_travel_east_west(self):
room1 = Room(self.game, "A place", "Description of a place. ")
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
room1.east = room2
room2.west = room1
self.me.location.removeThing(self.me)
room1.addThing(self.me)
self.assertIs(
self.me.location, room1, "This test needs the user to start in room1"
)
self.game.turnMain("go e")
self.assertEqual(self.app.print_stack[-3], room1.e_msg)
self.assertIs(
self.me.location,
room2,
f"Tried to travel east to {room2}, '{room2.name}', but player in "
f"{self.me.location}",
)
self.game.turnMain("w")
self.assertEqual(self.app.print_stack[-3], room1.w_msg)
self.assertIs(
self.me.location,
room1,
f"Tried to travel west to {room1}, '{room1.name}', but player in "
f"{self.me.location}",
)
def test_travel_southeast_northwest(self):
room1 = Room(self.game, "A place", "Description of a place. ")
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
room1.southeast = room2
room2.northwest = room1
self.me.location.removeThing(self.me)
room1.addThing(self.me)
self.assertIs(
self.me.location, room1, "This test needs the user to start in room1"
)
self.game.turnMain("go southeast")
self.assertEqual(self.app.print_stack[-3], room1.se_msg)
self.assertIs(
self.me.location,
room2,
f"Tried to travel southeast to {room2}, '{room2.name}', but player in "
f"{self.me.location}",
)
self.game.turnMain("go northwest")
self.assertEqual(self.app.print_stack[-3], room1.nw_msg)
self.assertIs(
self.me.location,
room1,
f"Tried to travel northwest to {room1}, '{room1.name}', but player in "
f"{self.me.location}",
)
def test_travel_up_down(self):
room1 = Room(self.game, "A place", "Description of a place. ")
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
room1.up = room2
room2.down = room1
self.me.location.removeThing(self.me)
room1.addThing(self.me)
self.assertIs(
self.me.location, room1, "This test needs the user to start in room1"
)
self.game.turnMain("u")
self.assertEqual(self.app.print_stack[-3], room1.u_msg)
self.assertIs(
self.me.location,
room2,
f"Tried to travel up to {room2}, '{room2.name}', but player in "
f"{self.me.location}",
)
self.game.turnMain("go d")
self.assertEqual(self.app.print_stack[-3], room1.d_msg)
self.assertIs(
self.me.location,
room1,
f"Tried to travel down to {room1}, '{room1.name}', but player in "
f"{self.me.location}",
)
def test_travel_in_out(self):
room1 = Room(self.game, "A place", "Description of a place. ")
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
room1.entrance = room2
room2.exit = room1
self.me.location.removeThing(self.me)
room1.addThing(self.me)
self.assertIs(
self.me.location, room1, "This test needs the user to start in room1"
)
self.game.turnMain("enter")
self.assertEqual(self.app.print_stack[-3], room1.in_msg)
self.assertIs(
self.me.location,
room2,
f"Tried to travel in to {room2}, '{room2.name}', but player in "
f"{self.me.location}",
)
self.game.turnMain("exit")
self.assertEqual(self.app.print_stack[-3], room1.out_msg)
self.assertIs(
self.me.location,
room1,
f"Tried to travel out to {room1}, '{room1.name}', but player in "
f"{self.me.location}",
)
class TestTravelConnectors(IFPTestCase):
def _assert_can_travel(self, room1, room2, command, return_command):
self.me.location.removeThing(self.me)
room1.addThing(self.me)
self.assertIs(
self.me.location, room1, "This test needs the user to start in room1"
)
self.game.turnMain(command)
self.assertIs(
self.me.location,
room2,
f"Tried to travel to {room2}, '{room2.name}', but player in "
f"{self.me.location}",
)
self.game.turnMain(return_command)
self.assertIs(
self.me.location,
room1,
f"Tried to travel to {room1}, '{room1.name}', but player in "
f"{self.me.location}",
)
def test_create_TravelConnector_with_invalid_direction(self):
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
with self.assertRaises(IFPError):
c = TravelConnector(
self.game, self.start_room, "lllllllrrrrrkkkk", room2, "s"
)
def test_cannot_travel_blocked_connector(self):
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
c = TravelConnector(self.game, self.start_room, "n", room2, "s")
c.can_pass = False
self.assertItemIn(
self.me, self.start_room.contains, "test needs player to start here"
)
self.assertIs(self.start_room.north, c)
self.game.turnMain("n")
self.assertIn(c.cannot_pass_msg, self.app.print_stack)
self.assertIs(self.me.location, self.start_room)
self.assertItemIn(
self.me, self.start_room.contains, "player should not have moved"
)
def test_cannot_travel_if_barrier_function_blocks(self):
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
c = TravelConnector(self.game, self.start_room, "n", room2, "s")
c.barrierFunc = lambda g: True
self.assertItemIn(
self.me, self.start_room.contains, "test needs player to start here"
)
self.assertIs(self.start_room.north, c)
self.game.turnMain("n")
self.assertIs(self.me.location, self.start_room)
self.assertItemIn(
self.me, self.start_room.contains, "player should not have moved"
)
def test_cannot_travel_in_darkness(self):
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
c = TravelConnector(self.game, self.start_room, "n", room2, "s")
self.start_room.dark = True
self.assertItemIn(
self.me, self.start_room.contains, "test needs player to start here"
)
self.assertIs(self.start_room.north, c)
self.game.turnMain("n")
self.assertIn("It's too dark to find your way. ", self.app.print_stack)
self.assertIs(self.me.location, self.start_room)
self.assertItemIn(
self.me, self.start_room.contains, "player should not have moved"
)
def test_can_travel_TravelConnector(self):
self.me.position = "sitting"
room1 = Room(self.game, "A place", "Description of a place. ")
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
c = TravelConnector(self.game, room1, "n", room2, "s")
c.entrance_a_msg = "You creep north. "
self._assert_can_travel(room1, room2, "n", "s")
self.assertIn("You stand up. ", self.app.print_stack)
self.assertEqual(self.me.position, "standing")
self.assertIn(c.entrance_a_msg, self.app.print_stack)
self.assertIn("You go through the south doorway. ", self.app.print_stack)
self.assertIn(
room1.desc + "There is a doorway to the north. ", self.app.print_stack
)
def test_can_travel_DoorConnector(self):
room1 = Room(self.game, "A place", "Description of a place. ")
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
c = DoorConnector(self.game, room1, "n", room2, "s")
c.entrance_a.makeClosed()
self._assert_can_travel(room1, room2, "n", "s")
self.assertIn("You open the north door. ", self.app.print_stack)
self.assertIn("You go through the north door. ", self.app.print_stack)
self.assertIn("You go through the south door. ", self.app.print_stack)
self.assertIn(
room1.desc + "There is a door to the north. It is open. ",
self.app.print_stack,
)
def test_cannot_travel_closed_and_locked_door(self):
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
c = DoorConnector(self.game, self.start_room, "n", room2, "s")
lock = Lock(self.game, is_locked=True, key_obj=None)
c.setLock(lock)
self.assertItemIn(
self.me, self.start_room.contains, "test needs player to start here"
)
self.assertIs(self.start_room.north, c)
self.game.turnMain("n")
self.assertIn(
f"{c.entrance_a.capNameArticle(True)} is locked. ", self.app.print_stack
)
self.assertIs(self.me.location, self.start_room)
self.assertItemIn(
self.me, self.start_room.contains, "player should not have moved"
)
def test_cannot_set_non_lock_as_door_lock(self):
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
c = DoorConnector(self.game, self.start_room, "n", room2, "s")
lock = Surface(self.game, "lock?")
with self.assertRaises(IFPError):
c.setLock(lock)
def test_lock_already_attached_to_something_cannot_be_applied_to_a_door(self):
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
c = DoorConnector(self.game, self.start_room, "n", room2, "s")
lock = Lock(self.game, is_locked=True, key_obj=None)
c.setLock(lock)
c2 = DoorConnector(self.game, self.start_room, "e", room2, "w")
with self.assertRaises(IFPError):
c2.setLock(lock)
def test_can_travel_LadderConnector(self):
room1 = Room(self.game, "A place", "Description of a place. ")
room2 = Room(
self.game, "A different place", "Description of a different place. "
)
c = LadderConnector(self.game, room1, room2)
self._assert_can_travel(room1, room2, "u", "d")
self.assertIn("You climb up the upward ladder. ", self.app.print_stack)
self.assertIn("You climb down the downward ladder. ", self.app.print_stack)
self.assertIn(room1.desc + "A ladder leads up. ", self.app.print_stack)
def test_can_travel_StaircaseConnector(self):
room1 = Room(self.game, "A place", "Description of a place. ")
room2 = Room(
self.game, "A different place", "Description of a | |
+= 'Avg AcceptedTx PerSec,'
line += 'ClocksPerSec,'
line += '# VERSION Msgs,'
line += 'Num VERSION PerSec,'
line += 'Avg VERSION PerSec,'
line += 'ClocksAvg VERSION,'
line += 'ClocksMax VERSION,'
line += 'BytesAvg VERSION,'
line += 'BytesMax VERSION,'
line += '# VERACK Msgs,'
line += 'Num VERACK PerSec,'
line += 'Avg VERACK PerSec,'
line += 'ClocksAvg VERACK,'
line += 'ClocksMax VERACK,'
line += 'BytesAvg VERACK,'
line += 'BytesMax VERACK,'
line += '# ADDR Msgs,'
line += 'Num ADDR PerSec,'
line += 'Avg ADDR PerSec,'
line += 'ClocksAvg ADDR,'
line += 'ClocksMax ADDR,'
line += 'BytesAvg ADDR,'
line += 'BytesMax ADDR,'
line += '# INV Msgs,'
line += 'Num INV PerSec,'
line += 'Avg INV PerSec,'
line += 'ClocksAvg INV,'
line += 'ClocksMax INV,'
line += 'BytesAvg INV,'
line += 'BytesMax INV,'
line += '# GETDATA Msgs,'
line += 'Num GETDATA PerSec,'
line += 'Avg GETDATA PerSec,'
line += 'ClocksAvg GETDATA,'
line += 'ClocksMax GETDATA,'
line += 'BytesAvg GETDATA,'
line += 'BytesMax GETDATA,'
line += '# MERKLEBLOCK Msgs,'
line += 'Num MERKLEBLOCK PerSec,'
line += 'Avg MERKLEBLOCK PerSec,'
line += 'ClocksAvg MERKLEBLOCK,'
line += 'ClocksMax MERKLEBLOCK,'
line += 'BytesAvg MERKLEBLOCK,'
line += 'BytesMax MERKLEBLOCK,'
line += '# GETBLOCKS Msgs,'
line += 'Num GETBLOCKS PerSec,'
line += 'Avg GETBLOCKS PerSec,'
line += 'ClocksAvg GETBLOCKS,'
line += 'ClocksMax GETBLOCKS,'
line += 'BytesAvg GETBLOCKS,'
line += 'BytesMax GETBLOCKS,'
line += '# GETHEADERS Msgs,'
line += 'Num GETHEADERS PerSec,'
line += 'Avg GETHEADERS PerSec,'
line += 'ClocksAvg GETHEADERS,'
line += 'ClocksMax GETHEADERS,'
line += 'BytesAvg GETHEADERS,'
line += 'BytesMax GETHEADERS,'
line += '# TX Msgs,'
line += 'Num TX PerSec,'
line += 'Avg TX PerSec,'
line += 'ClocksAvg TX,'
line += 'ClocksMax TX,'
line += 'BytesAvg TX,'
line += 'BytesMax TX,'
line += '# HEADERS Msgs,'
line += 'Num HEADERS PerSec,'
line += 'Avg HEADERS PerSec,'
line += 'ClocksAvg HEADERS,'
line += 'ClocksMax HEADERS,'
line += 'BytesAvg HEADERS,'
line += 'BytesMax HEADERS,'
line += '# BLOCK Msgs,'
line += 'Num BLOCK PerSec,'
line += 'Avg BLOCK PerSec,'
line += 'ClocksAvg BLOCK,'
line += 'ClocksMax BLOCK,'
line += 'BytesAvg BLOCK,'
line += 'BytesMax BLOCK,'
line += '# GETADDR Msgs,'
line += 'Num GETADDR PerSec,'
line += 'Avg GETADDR PerSec,'
line += 'ClocksAvg GETADDR,'
line += 'ClocksMax GETADDR,'
line += 'BytesAvg GETADDR,'
line += 'BytesMax GETADDR,'
line += '# MEMPOOL Msgs,'
line += 'Num MEMPOOL PerSec,'
line += 'Avg MEMPOOL PerSec,'
line += 'ClocksAvg MEMPOOL,'
line += 'ClocksMax MEMPOOL,'
line += 'BytesAvg MEMPOOL,'
line += 'BytesMax MEMPOOL,'
line += '# PING Msgs,'
line += 'Num PING PerSec,'
line += 'Avg PING PerSec,'
line += 'ClocksAvg PING,'
line += 'ClocksMax PING,'
line += 'BytesAvg PING,'
line += 'BytesMax PING,'
line += '# PONG Msgs,'
line += 'Num PONG PerSec,'
line += 'Avg PONG PerSec,'
line += 'ClocksAvg PONG,'
line += 'ClocksMax PONG,'
line += 'BytesAvg PONG,'
line += 'BytesMax PONG,'
line += '# NOTFOUND Msgs,'
line += 'Num NOTFOUND PerSec,'
line += 'Avg NOTFOUND PerSec,'
line += 'ClocksAvg NOTFOUND,'
line += 'ClocksMax NOTFOUND,'
line += 'BytesAvg NOTFOUND,'
line += 'BytesMax NOTFOUND,'
line += '# FILTERLOAD Msgs,'
line += 'Num FILTERLOAD PerSec,'
line += 'Avg FILTERLOAD PerSec,'
line += 'ClocksAvg FILTERLOAD,'
line += 'ClocksMax FILTERLOAD,'
line += 'BytesAvg FILTERLOAD,'
line += 'BytesMax FILTERLOAD,'
line += '# FILTERADD Msgs,'
line += 'Num FILTERADD PerSec,'
line += 'Avg FILTERADD PerSec,'
line += 'ClocksAvg FILTERADD,'
line += 'ClocksMax FILTERADD,'
line += 'BytesAvg FILTERADD,'
line += 'BytesMax FILTERADD,'
line += '# FILTERCLEAR Msgs,'
line += 'Num FILTERCLEAR PerSec,'
line += 'Avg FILTERCLEAR PerSec,'
line += 'ClocksAvg FILTERCLEAR,'
line += 'ClocksMax FILTERCLEAR,'
line += 'BytesAvg FILTERCLEAR,'
line += 'BytesMax FILTERCLEAR,'
line += '# SENDHEADERS Msgs,'
line += 'Num SENDHEADERS PerSec,'
line += 'Avg SENDHEADERS PerSec,'
line += 'ClocksAvg SENDHEADERS,'
line += 'ClocksMax SENDHEADERS,'
line += 'BytesAvg SENDHEADERS,'
line += 'BytesMax SENDHEADERS,'
line += '# FEEFILTER Msgs,'
line += 'Num FEEFILTER PerSec,'
line += 'Avg FEEFILTER PerSec,'
line += 'ClocksAvg FEEFILTER,'
line += 'ClocksMax FEEFILTER,'
line += 'BytesAvg FEEFILTER,'
line += 'BytesMax FEEFILTER,'
line += '# SENDCMPCT Msgs,'
line += 'Num SENDCMPCT PerSec,'
line += 'Avg SENDCMPCT PerSec,'
line += 'ClocksAvg SENDCMPCT,'
line += 'ClocksMax SENDCMPCT,'
line += 'BytesAvg SENDCMPCT,'
line += 'BytesMax SENDCMPCT,'
line += '# CMPCTBLOCK Msgs,'
line += 'Num CMPCTBLOCK PerSec,'
line += 'Avg CMPCTBLOCK PerSec,'
line += 'ClocksAvg CMPCTBLOCK,'
line += 'ClocksMax CMPCTBLOCK,'
line += 'BytesAvg CMPCTBLOCK,'
line += 'BytesMax CMPCTBLOCK,'
line += '# GETBLOCKTXN Msgs,'
line += 'Num GETBLOCKTXN PerSec,'
line += 'Avg GETBLOCKTXN PerSec,'
line += 'ClocksAvg GETBLOCKTXN,'
line += 'ClocksMax GETBLOCKTXN,'
line += 'BytesAvg GETBLOCKTXN,'
line += 'BytesMax GETBLOCKTXN,'
line += '# BLOCKTXN Msgs,'
line += 'Num BLOCKTXN PerSec,'
line += 'Avg BLOCKTXN PerSec,'
line += 'ClocksAvg BLOCKTXN,'
line += 'ClocksMax BLOCKTXN,'
line += 'BytesAvg BLOCKTXN,'
line += 'BytesMax BLOCKTXN,'
line += '# REJECT Msgs,'
line += 'Num REJECT PerSec,'
line += 'Avg REJECT PerSec,'
line += 'ClocksAvg REJECT,'
line += 'ClocksMax REJECT,'
line += 'BytesAvg REJECT,'
line += 'BytesMax REJECT,'
line += '# [UNDOCUMENTED] Msgs,'
line += 'Num [UNDOCUMENTED] PerSec,'
line += 'Avg [UNDOCUMENTED] PerSec,'
line += 'ClocksAvg [UNDOCUMENTED],'
line += 'ClocksMax [UNDOCUMENTED],'
line += 'BytesAvg [UNDOCUMENTED],'
line += 'BytesMax [UNDOCUMENTED],'
line += 'NumSkippedSamples,'
line += 'Power usage,'
line += 'Power usage unit,'
line += 'Power usage baseline,'
line += 'Power usage baseline unit,'
line += 'Hashrate,'
line += '\n'
line += 'Time in which the sample was taken (in human readable format),' # Timestamp
line += 'Time in which the sample was taken (in number of seconds since 1/01/1970),' # Timestamp (Seconds)
line += 'Number of peer connections,' # NumPeers
line += 'Sum of inbound connections (initiated by the peer),' # NumInbound
line += 'Sum of peers that do not have the full blockchain downloaded,' # NumPruned
line += 'Sum of peers that relay transactions,' # NumTXRelayers
line += 'Sum of peers that were connected through the "addnode command,' # NumAddnode
line += 'Sum of peers that have a bloom filter,' # NumBloom
line += 'Average round trip time for ping to return a pong,' # AvgPingTime
line += 'Sum of all banscores (0 means genuine and 100 means ban),' # TotalBanScore
line += 'An array of all peer addresses separated by a space,' # Connections
line += 'Percentage of CPU usage of bitcoind for each core summed together (i.e. it may exceed 100%),' # CPU %
line += 'Percentage of memory usage of bitcoind,' # Memory %
line += 'Megabytes of memory being used,' # Mem
line += 'System bytes per second of the download and upload rates,' # Full System Bandwidth (download/upload)
line += 'System CPU usage pergentage,' # Full System CPU %
#line += 'Contains the output from the "top" instruction for the bitcoind process,' # Resource usage
#line += 'A list of the current peer connections,' # GetPeerInfo
line += 'Block height of the node,' # BlockHeight
line += 'Amount of time it took for the block and header to reach you,' # BlockDelay
line += 'Number of unconfirmed transactions currently in the mempool,' # MempoolSize
line += 'Number of bytes that the mempool is currently taking up,' # MempoolBytes
line += 'Change in block height over the change in time for the most recent block update,' # AcceptedBlocksPerSec
line += 'Average of all previous AcceptedBlocksPerSec values,' # AcceptedBlocksPerSecAvg
line += 'Change in mempool size over the change in time for the most recent mempool size update,' # AcceptedTxPerSec
line += 'Average of all previous AcceptedTxPerSec values,' # AcceptedTxPerSecAvg
line += 'Number of clock cycles that occur each second within Bitcoin Core,' # ClocksPerSec
line += 'Total number of VERSION messages received thus far,' # # VERSION Msgs
line += 'Change in number of messages over the change in time for the most recent VERSION message received,' # Num VERSION PerSec
line += 'Average number of VERSION messages received per second thus far (the average of all previous Num VERSION PerSec values),' # Avg VERSION PerSec
line += 'Average number of clocks that it took to process this message,' # ClocksAvg VERSION
line += 'Maximum number of clocks that it took to process this message,' # ClocksMax VERSION
line += 'Average number of bytes that this message took up,' # BytesAvg VERSION
line += 'Maximum number of bytes that this message took up,' # BytesMax VERSION
line += 'Total number of VERACK messages received thus far,' # # VERACK Msgs
line += 'Change in number of messages over the change in time for the most recent VERACK message received,' # Num VERACK PerSec
line += 'Average number of VERACK messages received per second thus far (the average of all previous Num VERACK PerSec values),' # Avg VERACK PerSec
line += 'Average number of clocks that it took to process this message,' # ClocksAvg VERACK
line += 'Maximum number of clocks that it took to process this message,' # ClocksMax VERACK
line += 'Average number of bytes that this message took up,' # BytesAvg VERACK
line += 'Maximum number of bytes that this message took up,' # BytesMax VERACK
line += 'Total number of ADDR messages received thus far,' # # ADDR Msgs
line += 'Change in number of messages over the change in time for the most recent ADDR message received,' # | |
self.buttonSample.bind('<Button-1>', self.setSample)
self.buttonShowA.bind('<Button-1>', self.setFeatA)
self.buttonShowB.bind('<Button-1>', self.setFeatB)
self.buttonSaveDatasets.bind('<Button-1>', self.saveDataset)
self.buttonGetFeat.bind('<Button-1>', self.getFeat)
self.comboBoxTestType.bind('<<ComboboxSelected>>', self.setTest)
self.listFeatA.bind('<<ListboxSelect>>', self.selectValuesDatasetA)
self.listFeatB.bind('<<ListboxSelect>>', self.selectValuesDatasetB)
#self.listAttributes.bind('<<ListboxSelect>>', self.selectFocusFeatureValues)
'''
'''
TAB 2 - PREPROCESSOR
'''
self.labelFrameVariableDescriptor = LabelFrame(self.Tabs_t2)
self.labelFrameVariableDescriptor.place(relx=0.01, rely=0.0
, relheight=0.19, relwidth=0.98)
self.labelFrameVariableDescriptor.configure(relief=GROOVE)
self.labelFrameVariableDescriptor.configure(foreground="black")
self.labelFrameVariableDescriptor.configure(text='''Variable Description Generator (Not yet functional)''')
self.labelFrameVariableDescriptor.configure(background="#d9d9d9")
self.labelFrameVariableDescriptor.configure(width=980)
self.labelVariableFile = Label(self.labelFrameVariableDescriptor)
self.labelVariableFile.place(relx=0.01, rely=0.17, height=26, width=172)
self.labelVariableFile.configure(background="#d9d9d9")
self.labelVariableFile.configure(disabledforeground="#a3a3a3")
self.labelVariableFile.configure(foreground="#000000")
self.labelVariableFile.configure(text='''Variable File:''')
self.labelVariableFile.configure(width=172)
self.entryVariableFile = Entry(self.labelFrameVariableDescriptor)
self.entryVariableFile.place(relx=0.19, rely=0.17, relheight=0.21
, relwidth=0.64)
self.entryVariableFile.configure(background="white")
self.entryVariableFile.configure(disabledforeground="#a3a3a3")
self.entryVariableFile.configure(font="TkFixedFont")
self.entryVariableFile.configure(foreground="#000000")
self.entryVariableFile.configure(insertbackground="black")
self.entryVariableFile.configure(width=624)
self.buttonVariableFile = Button(self.labelFrameVariableDescriptor)
self.buttonVariableFile.place(relx=0.84, rely=0.17, height=23, width=146)
self.buttonVariableFile.configure(activebackground="#d9d9d9")
self.buttonVariableFile.configure(activeforeground="#000000")
self.buttonVariableFile.configure(background="#d9d9d9")
self.buttonVariableFile.configure(disabledforeground="#a3a3a3")
self.buttonVariableFile.configure(foreground="#000000")
self.buttonVariableFile.configure(highlightbackground="#d9d9d9")
self.buttonVariableFile.configure(highlightcolor="black")
self.buttonVariableFile.configure(pady="0")
self.buttonVariableFile.configure(text='''Choose File...''')
self.buttonVariableFile.configure(width=146)
self.labelValuesFile = Label(self.labelFrameVariableDescriptor)
self.labelValuesFile.place(relx=0.01, rely=0.43, height=26, width=172)
self.labelValuesFile.configure(background="#d9d9d9")
self.labelValuesFile.configure(disabledforeground="#a3a3a3")
self.labelValuesFile.configure(foreground="#000000")
self.labelValuesFile.configure(text='''Values File:''')
self.labelValuesFile.configure(width=172)
self.entryValuesFile = Entry(self.labelFrameVariableDescriptor)
self.entryValuesFile.place(relx=0.19, rely=0.43, relheight=0.21
, relwidth=0.64)
self.entryValuesFile.configure(background="white")
self.entryValuesFile.configure(disabledforeground="#a3a3a3")
self.entryValuesFile.configure(font="TkFixedFont")
self.entryValuesFile.configure(foreground="#000000")
self.entryValuesFile.configure(insertbackground="black")
self.entryValuesFile.configure(width=624)
self.buttonValuesFile = Button(self.labelFrameVariableDescriptor)
self.buttonValuesFile.place(relx=0.84, rely=0.43, height=23, width=146)
self.buttonValuesFile.configure(activebackground="#d9d9d9")
self.buttonValuesFile.configure(activeforeground="#000000")
self.buttonValuesFile.configure(background="#d9d9d9")
self.buttonValuesFile.configure(disabledforeground="#a3a3a3")
self.buttonValuesFile.configure(foreground="#000000")
self.buttonValuesFile.configure(highlightbackground="#d9d9d9")
self.buttonValuesFile.configure(highlightcolor="black")
self.buttonValuesFile.configure(pady="0")
self.buttonValuesFile.configure(text='''Choose File...''')
self.buttonValuesFile.configure(width=146)
self.buttonStartVariableDescriptor = Button(self.labelFrameVariableDescriptor)
self.buttonStartVariableDescriptor.place(relx=0.84, rely=0.7, height=23
, width=146)
self.buttonStartVariableDescriptor.configure(activebackground="#d9d9d9")
self.buttonStartVariableDescriptor.configure(activeforeground="#000000")
self.buttonStartVariableDescriptor.configure(background="#d9d9d9")
self.buttonStartVariableDescriptor.configure(disabledforeground="#a3a3a3")
self.buttonStartVariableDescriptor.configure(foreground="#000000")
self.buttonStartVariableDescriptor.configure(highlightbackground="#d9d9d9")
self.buttonStartVariableDescriptor.configure(highlightcolor="black")
self.buttonStartVariableDescriptor.configure(pady="0")
self.buttonStartVariableDescriptor.configure(text='''Start''')
self.buttonStartVariableDescriptor.configure(width=146)
self.entryInitialVarDesc = Entry(self.Tabs_t2)
self.entryInitialVarDesc.place(relx=0.19, rely=0.25, relheight=0.04
, relwidth=0.64)
self.entryInitialVarDesc.configure(background="white")
self.entryInitialVarDesc.configure(disabledforeground="#a3a3a3")
self.entryInitialVarDesc.configure(font="TkFixedFont")
self.entryInitialVarDesc.configure(foreground="#000000")
self.entryInitialVarDesc.configure(insertbackground="black")
self.entryInitialVarDesc.configure(width=624)
self.buttonInitialVarDesc = Button(self.Tabs_t2)
self.buttonInitialVarDesc.place(relx=0.84, rely=0.25, height=23
, width=146)
self.buttonInitialVarDesc.configure(activebackground="#d9d9d9")
self.buttonInitialVarDesc.configure(activeforeground="#000000")
self.buttonInitialVarDesc.configure(background="#d9d9d9")
self.buttonInitialVarDesc.configure(disabledforeground="#a3a3a3")
self.buttonInitialVarDesc.configure(foreground="#000000")
self.buttonInitialVarDesc.configure(highlightbackground="#d9d9d9")
self.buttonInitialVarDesc.configure(highlightcolor="black")
self.buttonInitialVarDesc.configure(pady="0")
self.buttonInitialVarDesc.configure(text='''Upload''')
self.buttonInitialVarDesc.configure(width=146)
self.labelInitialVarDesc = Label(self.Tabs_t2)
self.labelInitialVarDesc.place(relx=0.01, rely=0.2, height=26, width=250)
self.labelInitialVarDesc.configure(background="#d9d9d9")
self.labelInitialVarDesc.configure(disabledforeground="#a3a3a3")
self.labelInitialVarDesc.configure(foreground="#000000")
self.labelInitialVarDesc.configure(text='''Variable Description:''')
self.labelInitialVarDesc.configure(width=172)
self.entryQueryPopulation = Entry(self.Tabs_t2)
self.entryQueryPopulation.place(relx=0.19, rely=0.35, relheight=0.04
, relwidth=0.64)
self.entryQueryPopulation.configure(background="white")
self.entryQueryPopulation.configure(disabledforeground="#a3a3a3")
self.entryQueryPopulation.configure(font="TkFixedFont")
self.entryQueryPopulation.configure(foreground="#000000")
self.entryQueryPopulation.configure(insertbackground="black")
self.entryQueryPopulation.configure(width=654)
self.buttonQueryPopulation = Button(self.Tabs_t2)
self.buttonQueryPopulation.place(relx=0.84, rely=0.35, height=23
, width=146)
self.buttonQueryPopulation.configure(activebackground="#d9d9d9")
self.buttonQueryPopulation.configure(activeforeground="#000000")
self.buttonQueryPopulation.configure(background="#d9d9d9")
self.buttonQueryPopulation.configure(disabledforeground="#a3a3a3")
self.buttonQueryPopulation.configure(foreground="#000000")
self.buttonQueryPopulation.configure(highlightbackground="#d9d9d9")
self.buttonQueryPopulation.configure(highlightcolor="black")
self.buttonQueryPopulation.configure(pady="0")
self.buttonQueryPopulation.configure(text='''Upload''')
self.buttonQueryPopulation.configure(width=316)
self.labelInitialVarDesc = Label(self.Tabs_t2)
self.labelInitialVarDesc.place(relx=0.01, rely=0.3, height=26, width=250)
self.labelInitialVarDesc.configure(background="#d9d9d9")
self.labelInitialVarDesc.configure(disabledforeground="#a3a3a3")
self.labelInitialVarDesc.configure(foreground="#000000")
self.labelInitialVarDesc.configure(text='''Population Dataset:''')
self.labelInitialVarDesc.configure(width=172)
#--------Database Description File UI--------#
self.entryDatabaseDesc = Entry(self.Tabs_t2)
self.entryDatabaseDesc.place(relx=0.19, rely=0.45, relheight=0.04
, relwidth=0.64)
self.entryDatabaseDesc.configure(background="white")
self.entryDatabaseDesc.configure(disabledforeground="#a3a3a3")
self.entryDatabaseDesc.configure(font="TkFixedFont")
self.entryDatabaseDesc.configure(foreground="#000000")
self.entryDatabaseDesc.configure(insertbackground="black")
self.entryDatabaseDesc.configure(width=654)
self.buttonDatabaseDesc = Button(self.Tabs_t2)
self.buttonDatabaseDesc.place(relx=0.84, rely=0.45, height=23
, width=146)
self.buttonDatabaseDesc.configure(activebackground="#d9d9d9")
self.buttonDatabaseDesc.configure(activeforeground="#000000")
self.buttonDatabaseDesc.configure(background="#d9d9d9")
self.buttonDatabaseDesc.configure(disabledforeground="#a3a3a3")
self.buttonDatabaseDesc.configure(foreground="#000000")
self.buttonDatabaseDesc.configure(highlightbackground="#d9d9d9")
self.buttonDatabaseDesc.configure(highlightcolor="black")
self.buttonDatabaseDesc.configure(pady="0")
self.buttonDatabaseDesc.configure(text='''Submit''')
self.buttonDatabaseDesc.configure(width=316)
self.labelInitialVarDesc = Label(self.Tabs_t2)
self.labelInitialVarDesc.place(relx=0.01, rely=0.4, height=26, width=250)
self.labelInitialVarDesc.configure(background="#d9d9d9")
self.labelInitialVarDesc.configure(disabledforeground="#a3a3a3")
self.labelInitialVarDesc.configure(foreground="#000000")
self.labelInitialVarDesc.configure(text='''Enter unique code:''')
self.labelInitialVarDesc.configure(width=172)
#------------------------------------------#
'''
BINDING PREPROCESSOR ELEMENTS
'''
self.buttonStartVariableDescriptor.bind('<Button-1>', self.makeInitialVarDesc)
self.buttonVariableFile.bind('<Button-1>', self.getVariableFile)
self.buttonValuesFile.bind('<Button-1>', self.getValuesFile)
self.buttonInitialVarDesc.bind('<Button-1>', self.uploadInitVarDesc)
self.buttonDatabaseDesc.bind('<Button-1>', self.uploadDatabaseDescription)
'''
TAB 3 - QUERY
'''
self.labelFrameQueryDataA = LabelFrame(self.Tabs_t3)
self.labelFrameQueryDataA.place(relx=0.01, rely=0.07, relheight=0.7
, relwidth=0.48)
self.labelFrameQueryDataA.configure(relief=GROOVE)
self.labelFrameQueryDataA.configure(foreground="black")
self.labelFrameQueryDataA.configure(text='''Dataset A''')
self.labelFrameQueryDataA.configure(background="#d9d9d9")
self.labelFrameQueryDataA.configure(width=480)
global queryStrFilterB
self.entryQuerySetDataA = Entry(self.labelFrameQueryDataA)
self.entryQuerySetDataA.place(relx=0.02, rely=0.04, relheight=0.05
, relwidth=0.2)
self.entryQuerySetDataA.configure(background="white")
self.entryQuerySetDataA.configure(disabledforeground="#a3a3a3")
self.entryQuerySetDataA.configure(font="TkFixedFont")
self.entryQuerySetDataA.configure(foreground="#000000")
self.entryQuerySetDataA.configure(insertbackground="black")
self.entryQuerySetDataA.configure(width=94)
self.buttonQuerySetDataA = Button(self.labelFrameQueryDataA)
self.buttonQuerySetDataA.place(relx=0.02, rely=0.1, height=23, width=96)
self.buttonQuerySetDataA.configure(activebackground="#d9d9d9")
self.buttonQuerySetDataA.configure(activeforeground="#000000")
self.buttonQuerySetDataA.configure(background="#d9d9d9")
self.buttonQuerySetDataA.configure(disabledforeground="#a3a3a3")
self.buttonQuerySetDataA.configure(foreground="#000000")
self.buttonQuerySetDataA.configure(highlightbackground="#d9d9d9")
self.buttonQuerySetDataA.configure(highlightcolor="black")
self.buttonQuerySetDataA.configure(pady="0")
self.buttonQuerySetDataA.configure(text='''Find Feature''')
self.buttonQuerySetDataA.configure(width=96)
self.listQuerySetDataA = Listbox(self.labelFrameQueryDataA)
self.listQuerySetDataA.place(relx=0.23, rely=0.04, relheight=0.26
, relwidth=0.76)
self.listQuerySetDataA.configure(background="white")
self.listQuerySetDataA.configure(disabledforeground="#a3a3a3")
self.listQuerySetDataA.configure(font="TkFixedFont")
self.listQuerySetDataA.configure(foreground="#000000")
self.listQuerySetDataA.configure(width=364)
self.listQuerySetDataA.configure(selectmode=MULTIPLE)
self.listQuerySetDataA.configure(exportselection="0")
self.listQuerySetDataA.configure(highlightbackground="#d9d9d9")
self.listQuerySetDataA.configure(highlightcolor="black")
self.listQuerySetDataA.configure(selectbackground="#c4c4c4")
self.listQuerySetDataA.configure(selectforeground="black")
self.buttonQueryAddFilterA = Button(self.labelFrameQueryDataA)
self.buttonQueryAddFilterA.place(relx=0.02, rely=0.15, height=23, width=96)
self.buttonQueryAddFilterA.configure(activebackground="#d9d9d9")
self.buttonQueryAddFilterA.configure(activeforeground="#000000")
self.buttonQueryAddFilterA.configure(background="#d9d9d9")
self.buttonQueryAddFilterA.configure(disabledforeground="#a3a3a3")
self.buttonQueryAddFilterA.configure(foreground="#000000")
self.buttonQueryAddFilterA.configure(highlightbackground="#d9d9d9")
self.buttonQueryAddFilterA.configure(highlightcolor="black")
self.buttonQueryAddFilterA.configure(pady="0")
self.buttonQueryAddFilterA.configure(text='''Filter''')
self.buttonQueryAddFilterA.configure(width=96)
self.buttonQueryResetFilterA = Button(self.labelFrameQueryDataA)
self.buttonQueryResetFilterA.place(relx=0.02, rely=0.20, height=23, width=96)
self.buttonQueryResetFilterA.configure(activebackground="#d9d9d9")
self.buttonQueryResetFilterA.configure(activeforeground="#000000")
self.buttonQueryResetFilterA.configure(background="#d9d9d9")
self.buttonQueryResetFilterA.configure(disabledforeground="#a3a3a3")
self.buttonQueryResetFilterA.configure(foreground="#000000")
self.buttonQueryResetFilterA.configure(highlightbackground="#d9d9d9")
self.buttonQueryResetFilterA.configure(highlightcolor="black")
self.buttonQueryResetFilterA.configure(pady="0")
self.buttonQueryResetFilterA.configure(text='''Reset Dataset''')
self.buttonQueryResetFilterA.configure(width=96)
self.labelQueryDataACount = Label(self.labelFrameQueryDataA)
self.labelQueryDataACount.place(relx=0.02, rely=0.25, height=23, width=96)
self.labelQueryDataACount.configure(text='Count: ')
self.entryQueryFeatureA = Entry(self.labelFrameQueryDataA)
self.entryQueryFeatureA.place(relx=0.23, rely=0.32, relheight=0.05
, relwidth=0.76)
self.entryQueryFeatureA.configure(background="white")
self.entryQueryFeatureA.configure(disabledforeground="#a3a3a3")
self.entryQueryFeatureA.configure(font="TkFixedFont")
self.entryQueryFeatureA.configure(foreground="#000000")
self.entryQueryFeatureA.configure(insertbackground="black")
self.entryQueryFeatureA.configure(width=364)
self.buttonQueryFeatureA = Button(self.labelFrameQueryDataA)
self.buttonQueryFeatureA.place(relx=0.02, rely=0.32, height=23, width=96)
self.buttonQueryFeatureA.configure(activebackground="#d9d9d9")
self.buttonQueryFeatureA.configure(activeforeground="#000000")
self.buttonQueryFeatureA.configure(background="#d9d9d9")
self.buttonQueryFeatureA.configure(disabledforeground="#a3a3a3")
self.buttonQueryFeatureA.configure(foreground="#000000")
self.buttonQueryFeatureA.configure(highlightbackground="#d9d9d9")
self.buttonQueryFeatureA.configure(highlightcolor="black")
self.buttonQueryFeatureA.configure(pady="0")
self.buttonQueryFeatureA.configure(text='''Enter Code''')
self.buttonQueryFeatureA.configure(width=96)
self.listQueryDataA = Listbox(self.labelFrameQueryDataA)
self.listQueryDataA.place(relx=0.02, rely=0.43, relheight=0.48
, relwidth=0.97)
self.listQueryDataA.configure(background="white")
self.listQueryDataA.configure(disabledforeground="#a3a3a3")
self.listQueryDataA.configure(font="TkFixedFont")
self.listQueryDataA.configure(foreground="#000000")
self.listQueryDataA.configure(width=364)
self.listQueryDataA.configure(selectmode=MULTIPLE)
self.listQueryDataA.configure(exportselection="0")
self.listQueryDataA.configure(highlightbackground="#d9d9d9")
self.listQueryDataA.configure(highlightcolor="black")
self.listQueryDataA.configure(selectbackground="#c4c4c4")
self.listQueryDataA.configure(selectforeground="black")
self.labelQueryDataAFeature = Label(self.labelFrameQueryDataA)
self.labelQueryDataAFeature.place(relx=0.02, rely=0.38, relheight=0.05, relwidth=0.97)
self.labelQueryDataAFeature.configure(text='''''')
self.labelQueryDataA = Label(self.labelFrameQueryDataA)
self.labelQueryDataA.place(relx=0.02, rely=0.91, height=26, width=462)
self.labelQueryDataA.configure(background="#d9d9d9")
self.labelQueryDataA.configure(disabledforeground="#a3a3a3")
self.labelQueryDataA.configure(foreground="#000000")
self.labelQueryDataA.configure(text='''NO DATA SELECTED''')
self.labelQueryDataA.configure(width=462)
self.labelFrameQueryDataB = LabelFrame(self.Tabs_t3)
self.labelFrameQueryDataB.place(relx=0.5, rely=0.07, relheight=0.7
, relwidth=0.48)
self.labelFrameQueryDataB.configure(relief=GROOVE)
self.labelFrameQueryDataB.configure(foreground="black")
self.labelFrameQueryDataB.configure(text='''Dataset B''')
self.labelFrameQueryDataB.configure(background="#d9d9d9")
self.labelFrameQueryDataB.configure(width=480)
global queryStrFilterA
self.entryQuerySetDataB = Entry(self.labelFrameQueryDataB)
self.entryQuerySetDataB.place(relx=0.02, rely=0.04, relheight=0.05
, relwidth=0.2)
self.entryQuerySetDataB.configure(background="white")
self.entryQuerySetDataB.configure(disabledforeground="#a3a3a3")
self.entryQuerySetDataB.configure(font="TkFixedFont")
self.entryQuerySetDataB.configure(foreground="#000000")
self.entryQuerySetDataB.configure(insertbackground="black")
self.entryQuerySetDataB.configure(width=94)
self.buttonQuerySetDataB = Button(self.labelFrameQueryDataB)
self.buttonQuerySetDataB.place(relx=0.02, rely=0.1, height=23, width=96)
self.buttonQuerySetDataB.configure(activebackground="#d9d9d9")
self.buttonQuerySetDataB.configure(activeforeground="#000000")
self.buttonQuerySetDataB.configure(background="#d9d9d9")
self.buttonQuerySetDataB.configure(disabledforeground="#a3a3a3")
self.buttonQuerySetDataB.configure(foreground="#000000")
self.buttonQuerySetDataB.configure(highlightbackground="#d9d9d9")
self.buttonQuerySetDataB.configure(highlightcolor="black")
self.buttonQuerySetDataB.configure(pady="0")
self.buttonQuerySetDataB.configure(text='''Find Feature''')
self.buttonQuerySetDataB.configure(width=96)
self.listQuerySetDataB = Listbox(self.labelFrameQueryDataB)
self.listQuerySetDataB.place(relx=0.23, rely=0.04, relheight=0.26
, relwidth=0.76)
self.listQuerySetDataB.configure(background="white")
self.listQuerySetDataB.configure(disabledforeground="#a3a3a3")
self.listQuerySetDataB.configure(font="TkFixedFont")
self.listQuerySetDataB.configure(foreground="#000000")
self.listQuerySetDataB.configure(width=364)
self.listQuerySetDataB.configure(selectmode=MULTIPLE)
self.listQuerySetDataB.configure(exportselection="0")
self.listQuerySetDataB.configure(highlightbackground="#d9d9d9")
self.listQuerySetDataB.configure(highlightcolor="black")
self.listQuerySetDataB.configure(selectbackground="#c4c4c4")
self.listQuerySetDataB.configure(selectforeground="black")
self.buttonQueryAddFilterB = Button(self.labelFrameQueryDataB)
self.buttonQueryAddFilterB.place(relx=0.02, rely=0.15, height=23, width=96)
self.buttonQueryAddFilterB.configure(activebackground="#d9d9d9")
self.buttonQueryAddFilterB.configure(activeforeground="#000000")
self.buttonQueryAddFilterB.configure(background="#d9d9d9")
self.buttonQueryAddFilterB.configure(disabledforeground="#a3a3a3")
self.buttonQueryAddFilterB.configure(foreground="#000000")
self.buttonQueryAddFilterB.configure(highlightbackground="#d9d9d9")
self.buttonQueryAddFilterB.configure(highlightcolor="black")
self.buttonQueryAddFilterB.configure(pady="0")
self.buttonQueryAddFilterB.configure(text='''Filter''')
self.buttonQueryAddFilterB.configure(width=96)
self.buttonQueryResetFilterB = Button(self.labelFrameQueryDataB)
self.buttonQueryResetFilterB.place(relx=0.02, rely=0.20, height=23, width=96)
self.buttonQueryResetFilterB.configure(activebackground="#d9d9d9")
self.buttonQueryResetFilterB.configure(activeforeground="#000000")
self.buttonQueryResetFilterB.configure(background="#d9d9d9")
self.buttonQueryResetFilterB.configure(disabledforeground="#a3a3a3")
self.buttonQueryResetFilterB.configure(foreground="#000000")
self.buttonQueryResetFilterB.configure(highlightbackground="#d9d9d9")
self.buttonQueryResetFilterB.configure(highlightcolor="black")
self.buttonQueryResetFilterB.configure(pady="0")
self.buttonQueryResetFilterB.configure(text='''Reset Dataset''')
self.labelQueryDataBCount = Label(self.labelFrameQueryDataB)
self.labelQueryDataBCount.place(relx=0.02, rely=0.25, height=23, width=96)
self.labelQueryDataBCount.configure(text='Count: ')
self.entryQueryFeatureB = Entry(self.labelFrameQueryDataB)
self.entryQueryFeatureB.place(relx=0.23, rely=0.32, relheight=0.05
, relwidth=0.76)
self.entryQueryFeatureB.configure(background="white")
self.entryQueryFeatureB.configure(disabledforeground="#a3a3a3")
self.entryQueryFeatureB.configure(font="TkFixedFont")
self.entryQueryFeatureB.configure(foreground="#000000")
self.entryQueryFeatureB.configure(insertbackground="black")
self.entryQueryFeatureB.configure(width=364)
self.buttonQueryFeatureB = Button(self.labelFrameQueryDataB)
self.buttonQueryFeatureB.place(relx=0.02, rely=0.32, height=23, width=96)
self.buttonQueryFeatureB.configure(activebackground="#d9d9d9")
self.buttonQueryFeatureB.configure(activeforeground="#000000")
self.buttonQueryFeatureB.configure(background="#d9d9d9")
self.buttonQueryFeatureB.configure(disabledforeground="#a3a3a3")
self.buttonQueryFeatureB.configure(foreground="#000000")
self.buttonQueryFeatureB.configure(highlightbackground="#d9d9d9")
self.buttonQueryFeatureB.configure(highlightcolor="black")
self.buttonQueryFeatureB.configure(pady="0")
self.buttonQueryFeatureB.configure(text='''Enter Code''')
self.buttonQueryFeatureB.configure(width=96)
self.listQueryDataB = Listbox(self.labelFrameQueryDataB)
self.listQueryDataB.place(relx=0.02, rely=0.43, relheight=0.48
, relwidth=0.97)
self.listQueryDataB.configure(background="white")
self.listQueryDataB.configure(disabledforeground="#a3a3a3")
self.listQueryDataB.configure(font="TkFixedFont")
self.listQueryDataB.configure(foreground="#000000")
self.listQueryDataB.configure(width=364)
self.listQueryDataB.configure(selectmode=MULTIPLE)
self.listQueryDataB.configure(exportselection="0")
self.listQueryDataB.configure(highlightbackground="#d9d9d9")
self.listQueryDataB.configure(highlightcolor="black")
self.listQueryDataB.configure(selectbackground="#c4c4c4")
self.listQueryDataB.configure(selectforeground="black")
self.labelQueryDataBFeature = Label(self.labelFrameQueryDataB)
self.labelQueryDataBFeature.place(relx=0.02, rely=0.38, relheight=0.05, relwidth=0.97)
self.labelQueryDataBFeature.configure(text='''''')
self.labelQueryDataB = Label(self.labelFrameQueryDataB)
self.labelQueryDataB.place(relx=0.02, rely=0.91, height=26, width=462)
self.labelQueryDataB.configure(background="#d9d9d9")
self.labelQueryDataB.configure(disabledforeground="#a3a3a3")
self.labelQueryDataB.configure(foreground="#000000")
self.labelQueryDataB.configure(text='''NO DATA SELECTED''')
self.labelQueryDataB.configure(width=462)
global testTypes
testTypes = ["Sample vs Sample","Sample vs Population"]
self.comboQueryTest = ttk.Combobox(self.Tabs_t3)
self.comboQueryTest.place(relx=0.01, rely=0.02, height=23, width=316)
self.comboQueryTest.configure(exportselection="0")
self.comboQueryTest.configure(takefocus="")
self.comboQueryTest.configure(values=testTypes)
self.comboQueryTest.current(0)
self.comboQueryTest.configure(state="readonly")
self.labelFrameQueryZ = LabelFrame(self.Tabs_t3)
self.labelFrameQueryZ.place(relx=0.01, rely=0.78, relheight=0.1, relwidth=0.48)
self.labelFrameQueryZ.configure(relief=GROOVE)
self.labelFrameQueryZ.configure(foreground="black")
self.labelFrameQueryZ.configure(text='''Z-Test''')
self.labelFrameQueryZ.configure(background="#d9d9d9")
self.labelFrameQueryZ.configure(width=480)
self.labelQueryZTest = Label(self.labelFrameQueryZ)
self.labelQueryZTest.place(relx=0.47, rely=0.01, height=26, width=240)
# self.labelQueryZTest.configure(background="#d9d9d9")
self.labelQueryZTest.configure(disabledforeground="#a3a3a3")
self.labelQueryZTest.configure(foreground="#000000")
self.labelQueryZTest.configure(text='''NO DATA''')
self.labelQueryZTest.configure(width=862)
self.buttonQueryZTest = Button(self.labelFrameQueryZ)
self.buttonQueryZTest.place(relx=0.01, rely=0.01, height=23, width=106)
self.buttonQueryZTest.configure(activebackground="#d9d9d9")
self.buttonQueryZTest.configure(activeforeground="#000000")
self.buttonQueryZTest.configure(background="#d9d9d9")
self.buttonQueryZTest.configure(disabledforeground="#a3a3a3")
self.buttonQueryZTest.configure(foreground="#000000")
self.buttonQueryZTest.configure(highlightbackground="#d9d9d9")
self.buttonQueryZTest.configure(highlightcolor="black")
self.buttonQueryZTest.configure(pady="0")
self.buttonQueryZTest.configure(text='''Test''')
self.buttonQueryZTest.configure(width=106)
self.labelFrameQueryChi = LabelFrame(self.Tabs_t3)
self.labelFrameQueryChi.place(relx=0.5, rely=0.78, relheight=0.1
, relwidth=0.48)
self.labelFrameQueryChi.configure(relief=GROOVE)
self.labelFrameQueryChi.configure(foreground="black")
self.labelFrameQueryChi.configure(text='''Chi Test''')
self.labelFrameQueryChi.configure(background="#d9d9d9")
self.labelFrameQueryChi.configure(width=480)
global arrQueryCriticalValue
arrQueryCriticalValue = ["0.80", "0.90", "0.95", "0.98", "0.99"]
global arrQueryCriticalValueMapping
arrQueryCriticalValueMapping = {"0.80":1.28, "0.90":1.645, "0.95":1.96, "0.98":2.33, "0.99":2.58}
self.comboQueryCriticalValue = ttk.Combobox(self.labelFrameQueryZ)
self.comboQueryCriticalValue.place(relx=0.24, rely=0.01, height=23, width=106)
self.comboQueryCriticalValue.configure(exportselection="0")
self.comboQueryCriticalValue.configure(takefocus="")
self.comboQueryCriticalValue.configure(values=arrQueryCriticalValue)
self.comboQueryCriticalValue.set(arrQueryCriticalValue[0])
self.labelQueueCount = Label(self.Tabs_t3)
self.labelQueueCount.place(relx=0.87, rely=0.01, height=23, width=106)
self.labelQueueCount.configure(text='''Queue Count: 0''')
'''
self.buttonTest = Button(self.labelFrameQueryChi)
self.buttonTest.place(relx=0.01, rely=0.01, height=23, width=106)
self.buttonTest.configure(activebackground="#d9d9d9")
self.buttonTest.configure(activeforeground="#000000")
self.buttonTest.configure(background="#d9d9d9")
self.buttonTest.configure(disabledforeground="#a3a3a3")
self.buttonTest.configure(foreground="#000000")
self.buttonTest.configure(highlightbackground="#d9d9d9")
self.buttonTest.configure(highlightcolor="black")
self.buttonTest.configure(pady="0")
self.buttonTest.configure(text=''''Test'''')
'''
# self.buttonTest.configure(state='disabled')
self.buttonTestQueue = Button(self.labelFrameQueryChi)
self.buttonTestQueue.place(relx=0.7, rely=0.01, height=23, width=106)
self.buttonTestQueue.configure(activebackground="#d9d9d9")
self.buttonTestQueue.configure(activeforeground="#000000")
self.buttonTestQueue.configure(background="#d9d9d9")
self.buttonTestQueue.configure(disabledforeground="#a3a3a3")
self.buttonTestQueue.configure(foreground="#000000")
self.buttonTestQueue.configure(highlightbackground="#d9d9d9")
self.buttonTestQueue.configure(highlightcolor="black")
self.buttonTestQueue.configure(pady="0")
self.buttonTestQueue.configure(text='''Run Miner''')
# self.buttonTestQueue.configure(state='disabled')
self.buttonClearQueue = Button(self.labelFrameQueryChi)
self.buttonClearQueue.place(relx=0.47, rely=0.01, height=23, width=106)
self.buttonClearQueue.configure(activebackground="#d9d9d9")
self.buttonClearQueue.configure(activeforeground="#000000")
self.buttonClearQueue.configure(background="#d9d9d9")
self.buttonClearQueue.configure(disabledforeground="#a3a3a3")
self.buttonClearQueue.configure(foreground="#000000")
self.buttonClearQueue.configure(highlightbackground="#d9d9d9")
self.buttonClearQueue.configure(highlightcolor="black")
self.buttonClearQueue.configure(pady="0")
self.buttonClearQueue.configure(text='''Clear All''')
# self.buttonClearQueue.configure(state='disabled')
self.buttonQueue = Button(self.labelFrameQueryChi)
self.buttonQueue.place(relx=0.01, rely=0.01, height=23, width=106)
self.buttonQueue.configure(activebackground="#d9d9d9")
self.buttonQueue.configure(activeforeground="#000000")
self.buttonQueue.configure(background="#d9d9d9")
self.buttonQueue.configure(disabledforeground="#a3a3a3")
self.buttonQueue.configure(foreground="#000000")
self.buttonQueue.configure(highlightbackground="#d9d9d9")
self.buttonQueue.configure(highlightcolor="black")
self.buttonQueue.configure(pady="0")
self.buttonQueue.configure(text='''Enqueue''')
# self.buttonQueue.configure(state='disabled')
self.labelFrameQuerySvP = LabelFrame(self.Tabs_t3)
self.labelFrameQuerySvP.place(relx=0.01, rely=0.88, relheight=0.1
, relwidth=0.48)
self.labelFrameQuerySvP.configure(relief=GROOVE)
self.labelFrameQuerySvP.configure(foreground="black")
self.labelFrameQuerySvP.configure(text='''Z-Test Sample Vs Population''')
self.labelFrameQuerySvP.configure(background="#d9d9d9")
self.labelFrameQuerySvP.configure(width=480)
self.comboQueryCriticalValueSvP = ttk.Combobox(self.labelFrameQuerySvP)
self.comboQueryCriticalValueSvP.place(relx=0.24, rely=0.01, height=23, width=106)
self.comboQueryCriticalValueSvP.configure(exportselection="0")
self.comboQueryCriticalValueSvP.configure(takefocus="")
self.comboQueryCriticalValueSvP.configure(values=arrQueryCriticalValue)
self.comboQueryCriticalValueSvP.set(arrQueryCriticalValue[0])
self.comboQueryCriticalValueSvP.configure(state="disabled")
self.labelQueryZTestSvP = Label(self.labelFrameQuerySvP)
self.labelQueryZTestSvP.place(relx=0.47, rely=0.01, height=26, width=240)
# self.labelQueryZTest.configure(background="#d9d9d9")
self.labelQueryZTestSvP.configure(disabledforeground="#a3a3a3")
self.labelQueryZTestSvP.configure(foreground="#000000")
self.labelQueryZTestSvP.configure(text='''NO DATA''')
self.labelQueryZTestSvP.configure(width=862)
self.labelQueryZTestSvP.configure(state="disabled")
self.buttonQueryZTestSvP = Button(self.labelFrameQuerySvP)
self.buttonQueryZTestSvP.place(relx=0.01, rely=0.01, height=23, width=106)
self.buttonQueryZTestSvP.configure(activebackground="#d9d9d9")
self.buttonQueryZTestSvP.configure(activeforeground="#000000")
self.buttonQueryZTestSvP.configure(background="#d9d9d9")
self.buttonQueryZTestSvP.configure(disabledforeground="#a3a3a3")
self.buttonQueryZTestSvP.configure(foreground="#000000")
self.buttonQueryZTestSvP.configure(highlightbackground="#d9d9d9")
self.buttonQueryZTestSvP.configure(highlightcolor="black")
self.buttonQueryZTestSvP.configure(pady="0")
self.buttonQueryZTestSvP.configure(text='''Test''')
self.buttonQueryZTestSvP.configure(width=106)
self.buttonQueryZTestSvP.configure(state="disabled")
'''
BINDING FOR QUERY TAB
'''
self.buttonQueryPopulation.bind('<Button-1>', self.querySetPopulation)
self.buttonQuerySetDataA.bind('<Button-1>', self.querySetDataA)
self.buttonQuerySetDataB.bind('<Button-1>', self.querySetDataB)
self.buttonQueryAddFilterA.bind('<Button-1>', self.queryAddFilterA)
self.buttonQueryAddFilterB.bind('<Button-1>', self.queryAddFilterB)
self.buttonQueryFeatureA.bind('<Button-1>', self.querySetFeatureA)
self.buttonQueryFeatureB.bind('<Button-1>', self.querySetFeatureB)
self.buttonQueryZTest.bind('<Button-1>', self.queryZTest)
self.buttonQueryZTestSvP.bind('<Button-1>', self.querySVP)
self.buttonQueue.bind('<Button-1>', self.queue)
self.buttonClearQueue.bind('<Button-1>', self.clearQueue)
self.buttonTestQueue.bind('<Button-1>', self.testQueue)
self.buttonQueryResetFilterA.bind('<Button-1>', self.queryResetDatasetA)
self.buttonQueryResetFilterB.bind('<Button-1>', self.queryResetDatasetB)
self.listQuerySetDataA.bind('<<ListboxSelect>>', self.querySelectDataValuesA)
self.listQuerySetDataB.bind('<<ListboxSelect>>', self.querySelectDataValuesB)
self.listQueryDataA.bind('<<ListboxSelect>>', self.setFocusFeatureValuesA)
self.listQueryDataB.bind('<<ListboxSelect>>', self.setFocusFeatureValuesB)
self.comboQueryTest.bind('<<ComboboxSelected>>', self.querySetType)
#######################################
global queryType
queryType = self.comboQueryTest.get()
global populationDir
populationDir = ""
self.populationDataset = []
self.datasetA = {'Data':[], 'Filter Features':[]}
self.datasetB = {'Data':[], 'Filter Features':[]}
global tests
tests = []
self.labelQueryDataACount.configure(text="n: " + str(len(self.datasetA['Data'])))
self.labelQueryDataBCount.configure(text="n: " + str(len(self.datasetB['Data'])))
'''
Functions to be called by the bound commands
'''
#Adds test to the queue
def addToQueue(self, testType, **params):
global tests
test = {'Type':testType}
for key in params:
if(key == 'popDirArg'):
test['Population Path'] = copy.copy(params[key])
elif(key == 'sampleFeatArg'):
test['Sample Feature'] = copy.copy(params[key])
elif(key == 'selectedFeatArg'):
test['Selected Feature'] = copy.copy(params[key])
elif(key == 'allValArg'):
test['SF All Values'] = copy.copy(params[key])
elif(key == 'selValArg'):
test['SF Selected Values'] = copy.copy(params[key])
elif(key == 'datasetArgs'):
test['Datasets'] = copy.deepcopy(params[key])
elif(key == 'zArg'):
test['Z Critical Value'] = copy.copy(params[key])
tests.append(test)
self.labelQueueCount.configure(text='Queue Count: ' + str(len(tests)))
messagebox.showinfo("Test queued", test['Type'] + " has been queued.")
'''
DEFINING BOUND COMMANDS
'''
#Create the Initial Variable Descriptor
def makeInitialVarDesc(self, evt):
varFileDir = self.entryVariableFile.get()
valFileDir = self.entryValuesFile.get()
messagebox.showinfo("Work in progress",'Make the Initial Variable Descriptor! (WIP)')
def getVariableFile(self):
varFileDir = askopenfilename(title = "Select variable file",filetypes = (("txt files","*.txt"),("all files","*.*")))
self.entryVariableFile.delete(0, END)
self.entryVariableFile.insert(0, varFileDir)
def getValuesFile(self):
valFileDir = askopenfilename(title = "Select values file",filetypes = (("txt files","*.txt"),("all files","*.*")))
self.entryValuesFile.delete(0,END)
self.entryValuesFile.insert(0, valFileDir)
# ABOUT US
def showAbout(self):
strAbout = "OTOO Miner v4.0\n" \
"by TE3D House\n" \
"De La Salle University - Laguna"
messagebox.showinfo("About", strAbout)
# UPLOAD MODULE
def setPopulation(self, evt):
global populationDir
populationDir = filedialog.askopenfilename(title = "Select file",filetypes = (("csv files","*.csv"),("all files","*.*")))
self.entryQueryPopulation.delete(0,END)
self.entryQueryPopulation.insert(0,populationDir)
self.populationDataset = readCSVDict(populationDir)
self.datasetA['Data']=[]
self.datasetB['Data']=[]
if(len(list(self.populationDataset)) > 0):
messagebox.showinfo("Population set", "Population dataset uploaded")
self.populationDataset = readCSVDict(populationDir)
for record in self.populationDataset:
self.datasetA['Data'].append(record)
self.datasetB['Data'].append(record)
self.labelQueryDataACount.configure(text="n: " + str(len(self.datasetA['Data'])) )
self.labelQueryDataBCount.configure(text="n: " + str(len(self.datasetB['Data'])) )
else:
messagebox.showerror("Error: Upload error", "Error uploading population dataset. Please try again.")
def setPopulationFromDatabase(self):
self.populationDataset = readCSVDict(filenameFromDB + " results.csv")
self.datasetA['Data'] = []
self.datasetB['Data'] = | |
<filename>main.py<gh_stars>1-10
import scipy.io as sio
import numpy as np
from scipy.sparse import csc_matrix
import scipy.sparse as sp
from sklearn.preprocessing import normalize
import scipy.sparse.linalg as LA
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import argparse
import sklearn.metrics as metrics
from model import GCN, GFNN, MLP, pro_lstm_featwalk, pro_lstm_featwalk_full
from random_walk import walk_dic_featwalk
from utils import output_csv, subsample
import time
import sys, os
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='ATTR_RW')
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--dataset', type=str, default='BlogCatalog')
parser.add_argument('--proportion', type=float, default=0.25)
parser.add_argument('--saved', action='store_true', default=False)
parser.add_argument('--output_file', type=str, default='toy')
parser.add_argument('--times_features', action='store_true', default=False)
parser.add_argument('--subsample', action='store_true', default=False)
parser.add_argument('--data_path', type=str, default='.')
args = parser.parse_args()
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def writelines(self, datas):
self.stream.writelines(datas)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
print(args)
output_file = 'results/' + args.output_file + '_' + str(args.seed) + '.csv'
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
device = 'cuda:' + str(args.gpu)
torch.cuda.manual_seed(args.seed)
else:
device = 'cpu'
if args.dataset.lower() == 'blogcatalog':
mat_name = args.data_path + '/data/BlogCatalog.mat'
nb_classes = 6
elif args.dataset.lower() == 'flickr':
mat_name = args.data_path + '/data/Flickr.mat' #'data/Flickr_SDM.mat'
nb_classes = 9
mat_contents = sio.loadmat(mat_name)
adj = mat_contents['Network']
features = mat_contents['Attributes']
def concatenate_csc_matrices_by_columns(matrix1, matrix2):
assert matrix1.shape[0] == matrix2.shape[0]
new_data = np.concatenate((matrix1.data, matrix2.data))
new_indices = np.concatenate((matrix1.indices, matrix2.indices))
new_ind_ptr = matrix2.indptr + len(matrix1.data)
new_ind_ptr = new_ind_ptr[1:]
new_ind_ptr = np.concatenate((matrix1.indptr, new_ind_ptr))
return csc_matrix((new_data, new_indices, new_ind_ptr))
def concatenate_sparse_matrices_by_rows(matrix1, matrix2):
assert matrix1.shape[1] == matrix2.shape[1]
return concatenate_csc_matrices_by_columns(matrix1.transpose().tocsc(), matrix2.transpose().tocsc()).transpose().tocsc()
def concat_attr(adj, features, alpha=1.0):
adj_sum = sp.csc_matrix(np.diag(np.array(adj.sum(axis=1)).squeeze()))
features = normalize(features, norm='l1', axis=1)
features = adj_sum @ features * alpha
zeros = sp.csc_matrix(np.zeros(shape=(features.shape[1], features.shape[1])))
return concatenate_sparse_matrices_by_rows(concatenate_csc_matrices_by_columns(adj, features), concatenate_csc_matrices_by_columns(features.transpose().tocsc(), zeros))
def normalize_trans(adj):
# symmetric adj
# output: sym-normed_adj, deg ** (-0.5)
vol = adj.sum()
adj_deg = np.array(adj.sum(axis=0)).squeeze()
adj_deg_inv = sp.csc_matrix(np.diag(np.where(adj_deg>0, adj_deg**(-0.5), 0)))
adj_normed = adj_deg_inv @ adj @ adj_deg_inv
return adj_normed, adj_deg_inv.todense(), vol
# dataset split
k = args.proportion # used proportion of training set for evaluation
nb_nodes = adj.shape[0]
nb_train = int(nb_nodes * 0.8)
nb_train_real = int(nb_train * k)
nb_val_start = int(nb_train_real * 0.9)
random_perm = np.random.permutation(nb_nodes)
train_mask_real = random_perm[:nb_val_start]
val_mask = random_perm[nb_val_start:nb_train_real]
test_mask = random_perm[nb_train:]
def micro_f1(logits, labels):
# Compute predictions
preds = torch.argmax(logits, dim=1)
return metrics.f1_score(labels, preds, average='micro')
def macro_f1(logits, labels):
# Compute predictions
preds = torch.argmax(logits, dim=1)
return metrics.f1_score(labels, preds, average='macro')
# train
def train(model, optimizer, loss_func, adj, x, labels, train_mask, val_mask, test_mask, batch_size, epochs, iters_per_epoch, patience):
save_name = 'save_model/' + str(int(time.time())) + str(args.proportion) + '_' + str(args.seed) + '.pkl'
best_val_loss = 100 # a large loss
p = 0
for i in range(epochs):
train_mask = train_mask[np.random.permutation(train_mask.shape[0])]
train_start = 0
acum_loss = 0
acum_acc = 0
for j in range(iters_per_epoch):
model.train()
if train_start + batch_size < train_mask.shape[0]:
train_nodes = train_mask[train_start:train_start+batch_size]
train_start += batch_size
else:
train_nodes = list(train_mask[train_start:]) + list(train_mask[:train_start+batch_size-train_mask.shape[0]])
train_start += batch_size-train_mask.shape[0]
output = model(adj, x)
loss = loss_func(output[train_nodes], labels[train_nodes])
optimizer.zero_grad()
loss.backward()
optimizer.step()
acum_loss += loss.detach().cpu()
acum_acc += sum(torch.argmax(output[train_nodes], dim=1) == labels[train_nodes]).float()/batch_size
with torch.no_grad():
model.eval()
output = model(adj, x)
val_loss = loss_func(output[val_mask], labels[val_mask])
val_acc = torch.sum(torch.argmax(output[val_mask], dim=1) == labels[val_mask]).float()/val_mask.shape[0]
print('epoch: {}, training loss: {}, training acc: {}, val loss: {}, val acc: {}'.format(i, acum_loss/iters_per_epoch, acum_acc/iters_per_epoch, \
val_loss, val_acc))
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save(model.state_dict(), save_name)
p = 0
else:
if p >= patience:
break
p += 1
with torch.no_grad():
model.load_state_dict(torch.load(save_name))
model.eval()
output = model(adj, x)
#test_acc = torch.sum(torch.argmax(output[test_mask], dim=1) == labels[test_mask]).float()/test_mask.shape[0]
test_results = [micro_f1(output[test_mask].cpu(), labels[test_mask].cpu()), macro_f1(output[test_mask].cpu(), labels[test_mask].cpu())]
print(test_results)
output_csv(output_file, test_results)
os.remove(save_name)
#train mlp
def train_mlp(model, optimizer, loss_func, x, labels, train_mask, val_mask, test_mask, batch_size, epochs, iters_per_epoch, patience):
save_name = 'save_model/' + str(int(time.time())) + str(args.proportion) + '_' + str(args.seed) + '.pkl'
best_val_loss = 100 # a large loss
for i in range(epochs):
train_mask = train_mask[np.random.permutation(train_mask.shape[0])]
train_start = 0
acum_loss = 0
acum_acc = 0
for j in range(iters_per_epoch):
model.train()
if train_start + batch_size < train_mask.shape[0]:
train_nodes = train_mask[train_start:train_start+batch_size]
train_start += batch_size
else:
train_nodes = list(train_mask[train_start:]) + list(train_mask[:train_start+batch_size-train_mask.shape[0]])
train_start += batch_size-train_mask.shape[0]
output = model(x)
loss = loss_func(output[train_nodes], labels[train_nodes])
optimizer.zero_grad()
loss.backward()
optimizer.step()
acum_loss += loss.detach().cpu()
acum_acc += sum(torch.argmax(output[train_nodes], dim=1) == labels[train_nodes]).float()/batch_size
with torch.no_grad():
model.eval()
output = model(x)
val_loss = loss_func(output[val_mask], labels[val_mask])
val_acc = torch.sum(torch.argmax(output[val_mask], dim=1) == labels[val_mask]).float()/val_mask.shape[0]
print('epoch: {}, training loss: {}, training acc: {}, val loss: {}, val acc: {}'.format(i, acum_loss/iters_per_epoch, acum_acc/iters_per_epoch, \
val_loss, val_acc))
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save(model.state_dict(), save_name)
p = 0
else:
if p >= patience:
break
p += 1
with torch.no_grad():
model.load_state_dict(torch.load(save_name))
model.eval()
output = model(x)
#test_acc = torch.sum(torch.argmax(output[test_mask], dim=1) == labels[test_mask]).float()/test_mask.shape[0]
test_results = [micro_f1(output[test_mask].cpu(), labels[test_mask].cpu()), macro_f1(output[test_mask].cpu(), labels[test_mask].cpu())]
print(test_results)
output_csv(output_file, test_results)
os.remove(save_name)
def train_rna_rw(model, optimizer, loss_func, sentencedic, features, labels, train_mask, val_mask, test_mask, batch_size, epochs, iters_per_epoch, patience):
save_name = 'save_model/' + str(int(time.time())) + str(args.proportion) + '_' + str(args.seed) + '.pkl'
best_val_loss = 1e6 # a large loss
sentences_val = [[]]
val_mask_batch = [[]]
batch_start = 0
p=0
for i in val_mask:
if batch_start >= batch_size:
sentences_val.append([])
val_mask_batch.append([])
batch_start = 0
sentences_val[-1].extend(sentencedic[i])
val_mask_batch[-1].append(i)
batch_start += 1
sentences_test = [[]]
test_mask_batch = [[]]
batch_start = 0
for i in test_mask:
if batch_start >= batch_size:
sentences_test.append([])
test_mask_batch.append([])
batch_start = 0
sentences_test[-1].extend(sentencedic[i])
test_mask_batch[-1].append(i)
batch_start += 1
for i in range(epochs):
train_mask = train_mask[np.random.permutation(train_mask.shape[0])]
train_start = 0
acum_loss = 0
acum_acc = 0
for j in range(iters_per_epoch):
model.train()
if train_start + batch_size < train_mask.shape[0]:
train_nodes = train_mask[train_start:train_start+batch_size]
train_start += batch_size
else:
train_nodes = list(train_mask[train_start:]) + list(train_mask[:train_start+batch_size-train_mask.shape[0]])
train_start += batch_size-train_mask.shape[0]
sentences = []
for k in train_nodes:
sentences.extend(sentencedic[k])
output = model(features[sentences])
loss = loss_func(output, labels[train_nodes])
optimizer.zero_grad()
loss.backward()
optimizer.step()
acum_loss += loss.detach().cpu()
acum_acc += sum(torch.argmax(output, dim=1) == labels[train_nodes]).float()/batch_size
with torch.no_grad():
acum_val_acc = 0
acum_val_loss = 0
model.eval()
for mask, paths in zip(val_mask_batch, sentences_val):
output = model(features[paths])
acum_val_loss += loss_func(output, labels[mask]).detach().cpu() * len(mask)
acum_val_acc += torch.sum(torch.argmax(output, dim=1) == labels[mask]).float()
print('epoch: {}, training loss: {}, training acc: {}, val loss: {}, val acc: {}'.format(i, acum_loss/iters_per_epoch, acum_acc/iters_per_epoch, \
acum_val_loss/len(val_mask), acum_val_acc/len(val_mask)))
if acum_val_loss < best_val_loss:
best_val_loss = acum_val_loss
torch.save(model.state_dict(), save_name)
p = 0
else:
if p >= patience:
break
p += 1
with torch.no_grad():
model.load_state_dict(torch.load(save_name))
model.eval()
outputs = []
for mask, paths in zip(test_mask_batch, sentences_test):
outputs.append(model(features[paths]))
#test_acc = torch.sum(torch.argmax(output[test_mask], dim=1) == labels[test_mask]).float()/test_mask.shape[0]
outputs = torch.cat(outputs).cpu()
test_results = [micro_f1(outputs.cpu(), labels[test_mask].cpu()), macro_f1(outputs.cpu(), labels[test_mask].cpu())]
print(test_results)
output_csv(output_file, test_results)
os.remove(save_name)
# preprocess features
def preprocess_features(features):
"""Row-normalize feature matrix"""
features = normalize(features, norm='l1', axis=1)
return features.todense()
# numpy sparse to pytorch sparse
def np_sparse_to_pt_sparse(matrix):
coo = matrix.tocoo()
values = coo.data
indices = np.vstack((coo.row, coo.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
shape = coo.shape
return torch.sparse.FloatTensor(i, v, torch.Size(shape))
if args.model == 'GCN':
#x = preprocess_features(features)
x = features.todense()
x = torch.FloatTensor(x).to(device)
labels = mat_contents['Label']
labels = torch.LongTensor(labels-1).squeeze().to(device)
#adj_pt, _, _ = normalize_trans(adj+sp.eye(adj.shape[0]))
adj_pt = normalize_trans(adj)[0] + sp.eye(adj.shape[0])
adj_pt = np_sparse_to_pt_sparse(adj_pt).to(device)
model = GCN(x.size()[-1], 128, nb_classes).to(device)
optimizer = optim.Adam(model.parameters(), lr=0.01)
loss_func = nn.CrossEntropyLoss()
train(model, optimizer, loss_func, adj_pt, x, labels, train_mask_real, val_mask, test_mask, batch_size=128, epochs = 200, iters_per_epoch=int(nb_train_real/128)+1, patience=10)
elif args.model == 'GFNN' or args.model == 'SGC':
x = preprocess_features(features)
x = torch.FloatTensor(x).to(device)
labels = mat_contents['Label']
labels = torch.LongTensor(labels-1).squeeze().to(device)
adj_pt, _, _ = normalize_trans(adj+sp.eye(adj.shape[0]))
adj_pt = np_sparse_to_pt_sparse(adj_pt).to(device)
x = torch.spmm(adj_pt, x)
x = torch.spmm(adj_pt, x)
if args.model == 'GFNN':
model = MLP(x.size()[-1], 128, nb_classes).to(device)
optimizer = optim.Adam(model.parameters(), lr=0.01)
else:
model = MLP(x.size()[-1], out_dim = nb_classes).to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
loss_func = nn.CrossEntropyLoss()
train_mlp(model, optimizer, loss_func, x, labels, train_mask_real, val_mask, test_mask, batch_size=128, epochs = 200, iters_per_epoch=int(nb_train_real/128)+1, patience=10)
elif args.model == 'ATTR_RW_MF':
## ``Ours 1`` with (args.times_features == False)
## ``Ours 2`` with (args.times_features == True)
if args.saved == False:
# For the abalation study of local, non-local
if args.subsample:
features = subsample(adj, features)
trans_attr_rw, trans_deg_inv, vol = normalize_trans(concat_attr(adj, features))
rank_k = 300
window_size = 5
'''
vals, vecs = LA.eigsh(trans_attr_rw, k=rank_k)
vals_power = [vals]
for i in range(window_size):
vals_power.append(vals_power[-1] * vals)
vals_power = sum(vals_power) / window_size
trans_power = vecs @ np.diag(vals_power) @ vecs.transpose()
'''
trans_attr_rw = trans_attr_rw.todense()
trans_power = [trans_attr_rw]
for i in range(window_size-1):
trans_power.append(trans_power[-1] @ trans_attr_rw)
trans_power = sum(trans_power) / window_size
mf = (trans_deg_inv @ trans_power @ trans_deg_inv * vol).real
mf = mf[:nb_nodes, :nb_nodes]
mf[mf<1] = 1
mf = np.log(mf)
np.save(args.dataset + '_attr_rw_win5_nosvd.npy', mf)
exit()
else:
mf = np.load(args.dataset + '_attr_rw_win5_nosvd.npy')
mf_sp = csc_matrix(mf)
u, s, vt = LA.svds(mf_sp, k=200)
mf_embed = u @ np.diag(s ** 0.5)
if args.times_features:
mf = mf[:nb_nodes, :nb_nodes] @ features
else:
mf = mf[:nb_nodes, :nb_nodes]
x = torch.FloatTensor(mf).to(device)
labels = mat_contents['Label']
labels = torch.LongTensor(labels-1).squeeze().to(device)
#model = MLP(x.size()[-1], | |
<gh_stars>10-100
from __future__ import annotations
import io
from pathlib import Path
from typing import TYPE_CHECKING, BinaryIO, cast
import dask
import dask.array as da
import matplotlib.cm
import matplotlib.colors
import numpy as np
import xarray as xr
from dask.delayed import Delayed
from PIL import Image, ImageDraw, ImageFont
from typing_extensions import Literal
from xarray.plot.utils import _rescale_imshow_rgb
if TYPE_CHECKING:
import IPython.display
def _validate_arr_for_gif(
arr: xr.DataArray,
cmap: str | matplotlib.colors.Colormap | None,
date_format: str | None,
date_position: Literal["ul", "ur", "ll", "lr"],
) -> tuple[xr.DataArray, matplotlib.colors.Colormap | None]:
if arr.ndim not in (3, 4):
raise ValueError(
f"Array must only have the dimensions 'time', 'y', 'x', and optionally 'band', not {arr.dims!r}"
)
if arr.ndim == 3:
arr = arr.expand_dims("band", axis=1)
if arr.shape[1] not in (1, 3):
raise ValueError(f"Array must have 1 or 3 bands, not {arr.shape[1]}")
if arr.shape[1] == 1:
cmap = (
# this will use the default colormap (usually viridis) if it's None
matplotlib.cm.get_cmap(cmap)
if not isinstance(cmap, matplotlib.colors.Colormap)
else cmap
)
elif cmap is not None:
raise ValueError(
f"Colormaps are only possible on single-band data; this array has {arr.shape[1]} bands: "
f"{arr[arr.dims[1]].data.tolist()}"
)
if date_format:
time_coord = arr[arr.dims[0]]
try:
time_coord.dt.strftime
except (TypeError, AttributeError):
raise TypeError(
f"Coordinates for the {time_coord.name} dimension are not datetimes, or don't support `strftime`. "
"Set `date_format=False`"
)
assert date_position in (
"ul",
"ur",
"ll",
"lr",
), f"date_position must be one of ('ul', 'ur', 'll', 'lr'), not {date_position}."
return (arr, cmap)
def gif(
arr: xr.DataArray,
*,
to: str | Path | BinaryIO | None = None,
fps: int = 16,
robust: bool = True,
vmin: float | None = None,
vmax: float | None = None,
cmap: str | matplotlib.colors.Colormap | None = None,
date_format: str | None = "%Y-%m-%d",
date_position: Literal["ul", "ur", "ll", "lr"] = "ul",
date_color: tuple[int, int, int] = (255, 255, 255),
date_bg: tuple[int, int, int] | None = (0, 0, 0),
) -> IPython.display.Image | None:
"""
Render a `~xarray.DataArray` timestack (``time``, ``band``, ``y``, ``x``) into a GIF.
If the `~xarray.DataArray` contains a `dask.array.Array`, use `dgif` (delayed-GIF) instead.
The `~xarray.DataArray` must have 1 or 3 bands.
Unless ``date_format=None``, a small timestamp will be printed onto each frame of the animation.
You can control the position and styling of this with the ``date_position``, ``date_color``, and
``date_bg`` arguments.
Parameters
----------
arr:
Time-stacked array to animate. Must have 3 or 4 dimensions, which are assumed to be
in the order ``time``, [optional ``band``], ``y``, ``x``.
to:
Where to write the GIF. If None (default), an `IPython.display.Image` is returned,
which will display the GIF in your Jupyter notebook.
fps:
Frames per second
robust:
Calculate ``vmin`` and ``vmax`` from the 2nd and 98th percentiles of the data
(default True)
vmin:
Value in the data to map to 0 (black). If None (default), it's calculated
from the minimum value of the data or the 2nd percentile, depending on ``robust``.
vmax:
Value in the data to map to 255 (white). If None (default), it's calculated
from the maximum value of the data or the 98nd percentile, depending on ``robust``.
cmap:
Colormap to use for single-band data. Can be a
:doc:`matplotlib colormap name <gallery/color/colormap_reference>` as a string,
or a `~matplotlib.colors.Colormap` object for custom colormapping.
If None (default), the default matplotlib colormap (usually ``viridis``) will automatically
be used for 1-band data. Setting a colormap for multi-band data is an error.
date_format:
Date format string (like ``"%Y-%m-%d"``, the default) used to format the timestamps
written onto each frame of the animation. If the coordinates for axis 0 of the
`~xarray.DataArray` are not timestamps or timedeltas, you must explicitly pass
``date_format=None``.
See the `Python string format doc
<https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior>`__
for details.
date_position:
Where to print the timestamp on each frame.
One of ``"ul"`` (upper-left), ``"ur"`` (upper-right), ``"ll"`` (lower-left),
``"lr"`` (lower-right), default ``"ul"``.
date_color:
Color for the timestamp font, as an RGB 3-tuple. Default: ``(255, 255, 255)``
(white).
date_bg:
Fill color to draw behind the timestamp (for legibility), as an RGB 3-tuple.
Default: ``(0, 0, 0)`` (black). Set to None to disable.
Returns
-------
IPython.display.Image or None
If ``to`` is None, returns an `IPython.display.Image`, which will display the
GIF in a Jupyter Notebook. (You can also get the GIF data as bytes from the Image's
``.data`` attribute.)
Otherwise, returns None, and the GIF data is written to ``to``.
Example
-------
>>> # Generate a GIF and show it in your notebook:
>>> stackstac.gif(arr, date_format="Year: %Y")
>>> # Write the GIF to a file, with no timestamp printed:
>>> stackstac.gif(arr, to="animation.gif", fps=24, date_format=None)
>>> # Show a colormapped GIF of single-band data in your notebook,
>>> # with the timestamp font in black and no background behind it:
>>> stackstac.gif(
... arr.sel(band="ndvi"), cmap="YlGn", date_color=(0, 0, 0), date_bg=None
... )
"""
if isinstance(arr.data, da.Array):
raise TypeError("DataArray contains delayed data; use `dgif` instead.")
arr, cmap = _validate_arr_for_gif(arr, cmap, date_format, date_position)
# Rescale
if arr.dtype.kind == "b":
data = arr.data.astype("uint8", copy=False)
else:
if not robust and vmin is None and vmax is None:
vmin = np.nanmin(arr)
vmax = np.nanmax(arr)
rescaled: xr.DataArray = _rescale_imshow_rgb(arr, vmin, vmax, robust)
data: np.ndarray = rescaled.data
# Colormap
if arr.shape[1] == 1:
assert isinstance(cmap, matplotlib.colors.Colormap)
data = data[:, 0]
data = cmap(data)
data = np.moveaxis(data, -1, -3) # colormap puts RGB last
# Convert to uint8
u8 = (data * 255).astype("uint8")
u8 = np.clip(u8, 0, 255, out=u8)
u8 = np.moveaxis(u8, -3, -1)
# Add alpha mask
if data.shape[1] == 4:
# colormap has already added the alpha band
frames = u8
else:
mask: np.ndarray = arr.isnull().data.any(axis=-3)
alpha = (~mask).astype("uint8", copy=False) * 255
frames = np.concatenate([u8, alpha[..., None]], axis=-1)
imgs = [Image.fromarray(frame) for frame in frames]
# Write timestamps onto each frame
if date_format:
time_coord = arr[arr.dims[0]]
labels = time_coord.dt.strftime(date_format).data
fnt = ImageFont.load_default()
for label, img in zip(labels, imgs):
# get a drawing context
d = ImageDraw.Draw(img)
d = cast(ImageDraw.ImageDraw, d)
width, height = img.size
t_width, t_height = fnt.getsize(label)
offset = 15
if date_position[0] == "u":
y = offset
else:
y = height - t_height - offset
if date_position[1] == "l":
x = offset
else:
x = width - t_width - offset
if date_bg:
d.rectangle((x, y, x + t_width, y + t_height), fill=date_bg)
# draw text
d.multiline_text((x, y), label, font=fnt, fill=date_color)
out = to if to is not None else io.BytesIO()
imgs[0].save(
out,
format="gif",
save_all=True,
append_images=imgs[1:],
duration=1 / fps * 1000, # ms
loop=False,
)
if to is None and isinstance(out, io.BytesIO):
# second `isinstace` is just for the typechecker
try:
import IPython.display
except ImportError:
raise ImportError(
"Cannot return an Image to display in a notebook, since IPython is not installed. "
"Pass a path or file to save the GIF to as the `to=` argument. "
"To get the GIF data as bytes, pass an instance of `io.BytesIO()`.\n"
"If this error is coming from your distributed cluster and you called `dgif`, "
"then IPython is not installed on your dask workers. Either install it, or "
"pass `dgif(arr, bytes=True)` to return the GIF as bytes. "
"Then use `IPython.display.Image(data=computed_bytes)` to show the image."
)
else:
return IPython.display.Image(data=out.getvalue())
def _gif(arr: xr.DataArray, bytes=False, **kwargs):
to = io.BytesIO() if bytes else None
out = gif(arr, to=to, **kwargs)
return to.getvalue() if bytes else out
_dgif = dask.delayed(_gif, pure=True)
def dgif(
arr: xr.DataArray,
*,
bytes=False,
fps: int = 10,
robust: bool = True,
vmin: float | None = None,
vmax: float | None = None,
cmap: str | matplotlib.colors.Colormap | None = None,
date_format: str | None = "%Y-%m-%d",
date_position: Literal["ul", "ur", "ll", "lr"] = "ul",
date_color: tuple[int, int, int] = (255, 255, 255),
date_bg: tuple[int, int, int] | None = (0, 0, 0),
) -> Delayed:
"""
Turn a dask-backed `~xarray.DataArray` timestack into a GIF, as a `~dask.delayed.Delayed` object.
The `~xarray.DataArray` must have 1 or 3 bands, and dimensions in
(``time``, [optional ``band``], ``y``, ``x``) order.
If all you want is a GIF, `dgif` can be faster than calling ``.compute()`` and then `gif`:
since GIFs are smaller and reduced in quality from NumPy arrays, there's less data to | |
CAS connection object.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: <NAME>, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: False
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
Default: 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique'
Default: 'none'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
pre_trained_weights : bool, optional
Specifies whether to use the pre-trained weights from ImageNet data set
Default: False
pre_trained_weights_file : string, optional
Specifies the file name for the pre-trained weights.
Must be a fully qualified file name of SAS-compatible file (e.g., *.caffemodel.h5)
Note: Required when pre_trained_weights=True.
include_top : bool, optional
Specifies whether to include pre-trained weights of the top layers,
i.e. the last layer for classification.
Default: False.
Returns
-------
:class:`Sequential`
If `pre_trained_weights` is `False`
:class:`Model`
If `pre_trained_weights` is `True`
References
----------
https://arxiv.org/pdf/1512.03385.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
if not pre_trained_weights:
model = Sequential(conn=conn, model_table=model_table)
model.add(InputLayer(n_channels=n_channels, width=width, height=height, scale=scale, offsets=offsets,
random_flip=random_flip, random_crop=random_crop))
# Top layers
model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
model.add(Pooling(width=3, stride=2))
# Residual block configuration.
kernel_sizes_list = [(1, 3, 1)] * 4
n_filters_list = [(64, 64, 256), (128, 128, 512),
(256, 256, 1024), (512, 512, 2048)]
rep_nums_list = [3, 4, 23, 3]
for i in range(4):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if rep_num == 0:
conv_short_cut = True
if i == 0:
strides = 1
else:
strides = 2
else:
conv_short_cut = False
strides = 1
model.add(ResBlock_Caffe(kernel_sizes=kernel_sizes,
n_filters=n_filters, strides=strides,
batch_norm_first=batch_norm_first,
conv_short_cut=conv_short_cut))
# Bottom Layers
pooling_size = (width // 2 // 2 // 2 // 2 // 2, height // 2 // 2 // 2 // 2 // 2)
model.add(Pooling(width=pooling_size[0], height=pooling_size[1], pool='mean'))
model.add(OutputLayer(act='softmax', n=n_classes))
return model
else:
if pre_trained_weights_file is None:
raise DLPyError('\nThe pre-trained weights file is not specified.\n'
'Please follow the steps below to attach the pre-trained weights:\n'
'1. go to the website https://support.sas.com/documentation/prod-p/vdmml/zip/ '
'and download the associated weight file.\n'
'2. upload the *.h5 file to '
'a server side directory which the CAS session has access to.\n'
'3. specify the pre_trained_weights_file using the fully qualified server side path.')
model_cas = model_resnet101.ResNet101_Model( s=conn, model_table=model_table, n_channels=n_channels,
width=width, height=height, random_crop=random_crop,
offsets=offsets)
if include_top:
if n_classes != 1000:
warnings.warn('If include_top = True, n_classes will be set to 1000.', RuntimeWarning)
model = Model.from_table(model_cas)
model.load_weights(path=pre_trained_weights_file, labels=True)
return model
else:
model = Model.from_table(conn.CASTable(model_table), display_note=False)
model.load_weights(path=pre_trained_weights_file)
model._retrieve_('deeplearn.removelayer', model=model_table, name='fc1000')
model._retrieve_('deeplearn.addlayer', model=model_table, name='output',
layer=dict(type='output', n=n_classes, act='softmax'),
srcLayers=['pool5'])
weight_table_options = model.model_weights.to_table_params()
weight_table_options.update(dict(where='_LayerID_<244'))
model._retrieve_('table.partition', table=weight_table_options,
casout=dict(replace=True, **model.model_weights.to_table_params()))
model = Model.from_table(conn.CASTable(model_table))
return model
def ResNet152_SAS(conn, model_table='RESNET152_SAS', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
batch_norm_first=True, random_flip='none', random_crop='none', offsets=(103.939, 116.779, 123.68)):
'''
Generates a deep learning model with the SAS ResNet152 architecture.
Compared to Caffe ResNet152, the model prepends a batch normalization
layer to the last global pooling layer.
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: <NAME>, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: True
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies the height of the input layer.
Default: 224
scale : double, optional
Specifies a scaling factor to be applied to each pixel intensity values.
Default: 1
random_flip : string, optional
Specifies how to flip the data in the input layer when image data is
used. Approximately half of the input data is subject to flipping.
Valid Values: 'h', 'hv', 'v', 'none'
Default: 'none'
random_crop : string, optional
Specifies how to crop the data in the input layer when image data is
used. Images are cropped to the values that are specified in the width
and height parameters. Only the images with one or both dimensions
that are larger than those sizes are cropped.
Valid Values: 'none', 'unique'
Default: 'none'
offsets : double or iter-of-doubles, optional
Specifies an offset for each channel in the input data. The final input
data is set after applying scaling and subtracting the specified offsets.
Default: (103.939, 116.779, 123.68)
Returns
-------
:class:`Sequential`
References
----------
https://arxiv.org/pdf/1512.03385.pdf
'''
conn.retrieve('loadactionset', _messagelevel='error', actionset='deeplearn')
model = Sequential(conn=conn, model_table=model_table)
model.add(InputLayer(n_channels=n_channels, width=width, height=height, scale=scale, offsets=offsets,
random_flip=random_flip, random_crop=random_crop))
# Top layers
model.add(Conv2d(64, 7, act='identity', include_bias=False, stride=2))
model.add(BN(act='relu'))
model.add(Pooling(width=3, stride=2))
kernel_sizes_list = [(1, 3, 1)] * 4
n_filters_list = [(64, 64, 256), (128, 128, 512), (256, 256, 1024), (512, 512, 2048)]
rep_nums_list = [3, 8, 36, 3]
for i in range(4):
kernel_sizes = kernel_sizes_list[i]
n_filters = n_filters_list[i]
for rep_num in range(rep_nums_list[i]):
if i == 0:
strides = 1
else:
if rep_num == 0:
strides = 2
else:
strides = 1
model.add(ResBlockBN(kernel_sizes=kernel_sizes, n_filters=n_filters,
strides=strides, batch_norm_first=batch_norm_first))
model.add(BN(act='relu'))
# Bottom Layers
pooling_size = (width // 2 // 2 // 2 // 2 // 2,
height // 2 // 2 // 2 // 2 // 2)
model.add(Pooling(width=pooling_size[0], height=pooling_size[1], pool='mean'))
model.add(OutputLayer(act='softmax', n=n_classes))
return model
def ResNet152_Caffe(conn, model_table='RESNET152_CAFFE', n_classes=1000, n_channels=3, width=224, height=224, scale=1,
batch_norm_first=False, random_flip='none', random_crop='none', offsets=(103.939, 116.779, 123.68),
pre_trained_weights=False, pre_trained_weights_file=None, include_top=False):
'''
Generates a deep learning model with the ResNet152 architecture with convolution shortcut
Parameters
----------
conn : CAS
Specifies the CAS connection object.
model_table : string or dict or CAS table, optional
Specifies the CAS table to store the deep learning model.
batch_norm_first : bool, optional
Specifies whether to have batch normalization layer before the
convolution layer in the residual block. For a detailed discussion
about this, please refer to this paper: <NAME>, et al. "Identity
mappings in deep residual networks." European Conference on Computer
Vision. Springer International Publishing, 2016.
Default: False
n_classes : int, optional
Specifies the number of classes. If None is assigned, the model will
automatically detect the number of classes based on the training set.
Default: 1000
n_channels : int, optional
Specifies the number of the channels (i.e., depth) of the input layer.
Default: 3
width : int, optional
Specifies the width of the input layer.
Default: 224
height : int, optional
Specifies | |
<filename>google-cloud-sdk/lib/googlecloudsdk/third_party/apis/recommender/v1alpha2/recommender_v1alpha2_client.py<gh_stars>0
"""Generated client library for recommender version v1alpha2."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.recommender.v1alpha2 import recommender_v1alpha2_messages as messages
class RecommenderV1alpha2(base_api.BaseApiClient):
"""Generated client library for service recommender version v1alpha2."""
MESSAGES_MODULE = messages
BASE_URL = u'https://recommender.googleapis.com/'
_PACKAGE = u'recommender'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform']
_VERSION = u'v1alpha2'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'RecommenderV1alpha2'
_URL_VERSION = u'v1alpha2'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new recommender handle."""
url = url or self.BASE_URL
super(RecommenderV1alpha2, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.billingAccounts_locations_insightTypes_insights = self.BillingAccountsLocationsInsightTypesInsightsService(self)
self.billingAccounts_locations_insightTypes = self.BillingAccountsLocationsInsightTypesService(self)
self.billingAccounts_locations_recommenders_recommendations = self.BillingAccountsLocationsRecommendersRecommendationsService(self)
self.billingAccounts_locations_recommenders = self.BillingAccountsLocationsRecommendersService(self)
self.billingAccounts_locations = self.BillingAccountsLocationsService(self)
self.billingAccounts = self.BillingAccountsService(self)
self.projects_locations_insightTypes_insights = self.ProjectsLocationsInsightTypesInsightsService(self)
self.projects_locations_insightTypes = self.ProjectsLocationsInsightTypesService(self)
self.projects_locations_recommenders_recommendations = self.ProjectsLocationsRecommendersRecommendationsService(self)
self.projects_locations_recommenders = self.ProjectsLocationsRecommendersService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class BillingAccountsLocationsInsightTypesInsightsService(base_api.BaseApiService):
"""Service class for the billingAccounts_locations_insightTypes_insights resource."""
_NAME = u'billingAccounts_locations_insightTypes_insights'
def __init__(self, client):
super(RecommenderV1alpha2.BillingAccountsLocationsInsightTypesInsightsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets the requested insight. Requires the recommender.*.get IAM permission.
for the specified insight type.
Args:
request: (RecommenderBillingAccountsLocationsInsightTypesInsightsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecommenderV1alpha2Insight) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha2/billingAccounts/{billingAccountsId}/locations/{locationsId}/insightTypes/{insightTypesId}/insights/{insightsId}',
http_method=u'GET',
method_id=u'recommender.billingAccounts.locations.insightTypes.insights.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha2/{+name}',
request_field='',
request_type_name=u'RecommenderBillingAccountsLocationsInsightTypesInsightsGetRequest',
response_type_name=u'GoogleCloudRecommenderV1alpha2Insight',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists insights for a Cloud project. Requires the recommender.*.list IAM.
permission for the specified insight type.
Args:
request: (RecommenderBillingAccountsLocationsInsightTypesInsightsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecommenderV1alpha2ListInsightsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha2/billingAccounts/{billingAccountsId}/locations/{locationsId}/insightTypes/{insightTypesId}/insights',
http_method=u'GET',
method_id=u'recommender.billingAccounts.locations.insightTypes.insights.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1alpha2/{+parent}/insights',
request_field='',
request_type_name=u'RecommenderBillingAccountsLocationsInsightTypesInsightsListRequest',
response_type_name=u'GoogleCloudRecommenderV1alpha2ListInsightsResponse',
supports_download=False,
)
def MarkAccepted(self, request, global_params=None):
r"""Marks the Insight State as Accepted. Users can use this method to.
indicate to the Recommender API that they have applied some action based
on the insight. This stops the insight content from being updated.
MarkInsightAccepted can be applied to insights in ACTIVE state. Requires
the recommender.*.update IAM permission for the specified insight.
Args:
request: (RecommenderBillingAccountsLocationsInsightTypesInsightsMarkAcceptedRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecommenderV1alpha2Insight) The response message.
"""
config = self.GetMethodConfig('MarkAccepted')
return self._RunMethod(
config, request, global_params=global_params)
MarkAccepted.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha2/billingAccounts/{billingAccountsId}/locations/{locationsId}/insightTypes/{insightTypesId}/insights/{insightsId}:markAccepted',
http_method=u'POST',
method_id=u'recommender.billingAccounts.locations.insightTypes.insights.markAccepted',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha2/{+name}:markAccepted',
request_field=u'googleCloudRecommenderV1alpha2MarkInsightAcceptedRequest',
request_type_name=u'RecommenderBillingAccountsLocationsInsightTypesInsightsMarkAcceptedRequest',
response_type_name=u'GoogleCloudRecommenderV1alpha2Insight',
supports_download=False,
)
def MarkActive(self, request, global_params=None):
r"""Mark the Insight State as Active. Users can use this method to.
indicate to the Recommender API that a DISMISSED insight has to
be marked back as ACTIVE.
MarkInsightActive can be applied to insights in DISMISSED state. Requires
the recommender.*.update IAM permission for the specified insight type.
Args:
request: (RecommenderBillingAccountsLocationsInsightTypesInsightsMarkActiveRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecommenderV1alpha2Insight) The response message.
"""
config = self.GetMethodConfig('MarkActive')
return self._RunMethod(
config, request, global_params=global_params)
MarkActive.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha2/billingAccounts/{billingAccountsId}/locations/{locationsId}/insightTypes/{insightTypesId}/insights/{insightsId}:markActive',
http_method=u'POST',
method_id=u'recommender.billingAccounts.locations.insightTypes.insights.markActive',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha2/{+name}:markActive',
request_field=u'googleCloudRecommenderV1alpha2MarkInsightActiveRequest',
request_type_name=u'RecommenderBillingAccountsLocationsInsightTypesInsightsMarkActiveRequest',
response_type_name=u'GoogleCloudRecommenderV1alpha2Insight',
supports_download=False,
)
def MarkDismissed(self, request, global_params=None):
r"""Mark the Insight State as Dismissed. Users can use this method to.
indicate to the Recommender API that an ACTIVE insight should be dismissed.
MarkInsightDismissed can be applied to insights in ACTIVE state. Requires
the recommender.*.update IAM permission for the specified insight type.
Args:
request: (RecommenderBillingAccountsLocationsInsightTypesInsightsMarkDismissedRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecommenderV1alpha2Insight) The response message.
"""
config = self.GetMethodConfig('MarkDismissed')
return self._RunMethod(
config, request, global_params=global_params)
MarkDismissed.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha2/billingAccounts/{billingAccountsId}/locations/{locationsId}/insightTypes/{insightTypesId}/insights/{insightsId}:markDismissed',
http_method=u'POST',
method_id=u'recommender.billingAccounts.locations.insightTypes.insights.markDismissed',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha2/{+name}:markDismissed',
request_field=u'googleCloudRecommenderV1alpha2MarkInsightDismissedRequest',
request_type_name=u'RecommenderBillingAccountsLocationsInsightTypesInsightsMarkDismissedRequest',
response_type_name=u'GoogleCloudRecommenderV1alpha2Insight',
supports_download=False,
)
class BillingAccountsLocationsInsightTypesService(base_api.BaseApiService):
"""Service class for the billingAccounts_locations_insightTypes resource."""
_NAME = u'billingAccounts_locations_insightTypes'
def __init__(self, client):
super(RecommenderV1alpha2.BillingAccountsLocationsInsightTypesService, self).__init__(client)
self._upload_configs = {
}
class BillingAccountsLocationsRecommendersRecommendationsService(base_api.BaseApiService):
"""Service class for the billingAccounts_locations_recommenders_recommendations resource."""
_NAME = u'billingAccounts_locations_recommenders_recommendations'
def __init__(self, client):
super(RecommenderV1alpha2.BillingAccountsLocationsRecommendersRecommendationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets the requested recommendation. Requires the recommender.*.get.
IAM permission for the specified recommender.
Args:
request: (RecommenderBillingAccountsLocationsRecommendersRecommendationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecommenderV1alpha2Recommendation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha2/billingAccounts/{billingAccountsId}/locations/{locationsId}/recommenders/{recommendersId}/recommendations/{recommendationsId}',
http_method=u'GET',
method_id=u'recommender.billingAccounts.locations.recommenders.recommendations.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha2/{+name}',
request_field='',
request_type_name=u'RecommenderBillingAccountsLocationsRecommendersRecommendationsGetRequest',
response_type_name=u'GoogleCloudRecommenderV1alpha2Recommendation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists recommendations for a Cloud project. Requires the recommender.*.list.
IAM permission for the specified recommender.
Args:
request: (RecommenderBillingAccountsLocationsRecommendersRecommendationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecommenderV1alpha2ListRecommendationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha2/billingAccounts/{billingAccountsId}/locations/{locationsId}/recommenders/{recommendersId}/recommendations',
http_method=u'GET',
method_id=u'recommender.billingAccounts.locations.recommenders.recommendations.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'filter', u'pageSize', u'pageToken'],
relative_path=u'v1alpha2/{+parent}/recommendations',
request_field='',
request_type_name=u'RecommenderBillingAccountsLocationsRecommendersRecommendationsListRequest',
response_type_name=u'GoogleCloudRecommenderV1alpha2ListRecommendationsResponse',
supports_download=False,
)
def MarkActive(self, request, global_params=None):
r"""Mark the Recommendation State as Active. Users can use this method to.
indicate to the Recommender API that a DISMISSED recommendation has to
be marked back as ACTIVE.
MarkRecommendationActive can be applied to recommendations in DISMISSED
state.
Requires the recommender.*.update IAM permission for the specified
recommender.
Args:
request: (RecommenderBillingAccountsLocationsRecommendersRecommendationsMarkActiveRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecommenderV1alpha2Recommendation) The response message.
"""
config = self.GetMethodConfig('MarkActive')
return self._RunMethod(
config, request, global_params=global_params)
MarkActive.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha2/billingAccounts/{billingAccountsId}/locations/{locationsId}/recommenders/{recommendersId}/recommendations/{recommendationsId}:markActive',
http_method=u'POST',
method_id=u'recommender.billingAccounts.locations.recommenders.recommendations.markActive',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha2/{+name}:markActive',
request_field=u'googleCloudRecommenderV1alpha2MarkRecommendationActiveRequest',
request_type_name=u'RecommenderBillingAccountsLocationsRecommendersRecommendationsMarkActiveRequest',
response_type_name=u'GoogleCloudRecommenderV1alpha2Recommendation',
supports_download=False,
)
def MarkClaimed(self, request, global_params=None):
r"""Marks the Recommendation State as Claimed. Users can use this method to.
indicate to the Recommender API that they are starting to apply the
recommendation themselves. This stops the recommendation content from being
updated. Associated insights are frozen and placed in the ACCEPTED state.
MarkRecommendationClaimed can be applied to recommendations in CLAIMED or
ACTIVE state.
Requires the recommender.*.update IAM permission for the specified
recommender.
Args:
request: (RecommenderBillingAccountsLocationsRecommendersRecommendationsMarkClaimedRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecommenderV1alpha2Recommendation) The response message.
"""
config = self.GetMethodConfig('MarkClaimed')
return self._RunMethod(
config, request, global_params=global_params)
MarkClaimed.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha2/billingAccounts/{billingAccountsId}/locations/{locationsId}/recommenders/{recommendersId}/recommendations/{recommendationsId}:markClaimed',
http_method=u'POST',
method_id=u'recommender.billingAccounts.locations.recommenders.recommendations.markClaimed',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha2/{+name}:markClaimed',
request_field=u'googleCloudRecommenderV1alpha2MarkRecommendationClaimedRequest',
request_type_name=u'RecommenderBillingAccountsLocationsRecommendersRecommendationsMarkClaimedRequest',
response_type_name=u'GoogleCloudRecommenderV1alpha2Recommendation',
supports_download=False,
)
def MarkDismissed(self, request, global_params=None):
r"""Mark the Recommendation State as Dismissed. Users can use this method to.
indicate to the Recommender API that an ACTIVE recommendation has to
be marked back as DISMISSED.
MarkRecommendationDismissed can be applied to recommendations in ACTIVE
state.
Requires the recommender.*.update IAM permission for the specified
recommender.
Args:
request: (RecommenderBillingAccountsLocationsRecommendersRecommendationsMarkDismissedRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecommenderV1alpha2Recommendation) The response message.
"""
config = self.GetMethodConfig('MarkDismissed')
return self._RunMethod(
config, request, global_params=global_params)
MarkDismissed.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha2/billingAccounts/{billingAccountsId}/locations/{locationsId}/recommenders/{recommendersId}/recommendations/{recommendationsId}:markDismissed',
http_method=u'POST',
method_id=u'recommender.billingAccounts.locations.recommenders.recommendations.markDismissed',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha2/{+name}:markDismissed',
request_field=u'googleCloudRecommenderV1alpha2MarkRecommendationDismissedRequest',
request_type_name=u'RecommenderBillingAccountsLocationsRecommendersRecommendationsMarkDismissedRequest',
response_type_name=u'GoogleCloudRecommenderV1alpha2Recommendation',
supports_download=False,
)
def MarkFailed(self, request, global_params=None):
r"""Marks the Recommendation State as Failed. Users can use this method to.
indicate to the Recommender API that they have applied the recommendation
themselves, and the operation failed. This stops the recommendation content
from being updated. Associated insights are frozen and placed in the
ACCEPTED state.
MarkRecommendationFailed can be applied to recommendations in ACTIVE,
CLAIMED, SUCCEEDED, or FAILED state.
Requires the recommender.*.update IAM permission for the specified
recommender.
Args:
request: (RecommenderBillingAccountsLocationsRecommendersRecommendationsMarkFailedRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecommenderV1alpha2Recommendation) The response message.
"""
config = self.GetMethodConfig('MarkFailed')
return self._RunMethod(
config, request, global_params=global_params)
MarkFailed.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha2/billingAccounts/{billingAccountsId}/locations/{locationsId}/recommenders/{recommendersId}/recommendations/{recommendationsId}:markFailed',
http_method=u'POST',
method_id=u'recommender.billingAccounts.locations.recommenders.recommendations.markFailed',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha2/{+name}:markFailed',
request_field=u'googleCloudRecommenderV1alpha2MarkRecommendationFailedRequest',
request_type_name=u'RecommenderBillingAccountsLocationsRecommendersRecommendationsMarkFailedRequest',
response_type_name=u'GoogleCloudRecommenderV1alpha2Recommendation',
supports_download=False,
)
def MarkSucceeded(self, request, global_params=None):
r"""Marks the Recommendation State as Succeeded. Users can use this method to.
indicate to the Recommender API that they have applied the recommendation
themselves, and the operation was successful. This stops the recommendation
content from being updated. Associated insights are frozen and placed in
the ACCEPTED state.
MarkRecommendationSucceeded can be applied to recommendations in ACTIVE,
CLAIMED, SUCCEEDED, or FAILED state.
Requires the recommender.*.update IAM permission for the specified
recommender.
Args:
request: (RecommenderBillingAccountsLocationsRecommendersRecommendationsMarkSucceededRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecommenderV1alpha2Recommendation) The response message.
"""
config = self.GetMethodConfig('MarkSucceeded')
return self._RunMethod(
config, request, global_params=global_params)
MarkSucceeded.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha2/billingAccounts/{billingAccountsId}/locations/{locationsId}/recommenders/{recommendersId}/recommendations/{recommendationsId}:markSucceeded',
http_method=u'POST',
method_id=u'recommender.billingAccounts.locations.recommenders.recommendations.markSucceeded',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha2/{+name}:markSucceeded',
request_field=u'googleCloudRecommenderV1alpha2MarkRecommendationSucceededRequest',
request_type_name=u'RecommenderBillingAccountsLocationsRecommendersRecommendationsMarkSucceededRequest',
response_type_name=u'GoogleCloudRecommenderV1alpha2Recommendation',
supports_download=False,
)
class BillingAccountsLocationsRecommendersService(base_api.BaseApiService):
"""Service class for the billingAccounts_locations_recommenders resource."""
_NAME = u'billingAccounts_locations_recommenders'
def __init__(self, client):
super(RecommenderV1alpha2.BillingAccountsLocationsRecommendersService, self).__init__(client)
self._upload_configs = {
}
class BillingAccountsLocationsService(base_api.BaseApiService):
"""Service class for the billingAccounts_locations resource."""
_NAME = u'billingAccounts_locations'
def __init__(self, client):
super(RecommenderV1alpha2.BillingAccountsLocationsService, self).__init__(client)
self._upload_configs = {
}
class BillingAccountsService(base_api.BaseApiService):
"""Service class for the billingAccounts resource."""
_NAME = u'billingAccounts'
def __init__(self, client):
super(RecommenderV1alpha2.BillingAccountsService, self).__init__(client)
self._upload_configs = {
}
class ProjectsLocationsInsightTypesInsightsService(base_api.BaseApiService):
"""Service class for the projects_locations_insightTypes_insights resource."""
_NAME = u'projects_locations_insightTypes_insights'
def __init__(self, client):
super(RecommenderV1alpha2.ProjectsLocationsInsightTypesInsightsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets the requested insight. Requires the recommender.*.get | |
<reponame>tatsukichi108/cpython
"""Core implementation of path-based import.
This module is NOT meant to be directly imported! It has been designed such
that it can be bootstrapped into Python as the implementation of import. As
such it requires the injection of specific modules and attributes in order to
work. One should use importlib as the public-facing version of this module.
"""
# IMPORTANT: Whenever making changes to this module, be sure to run a top-level
# `make regen-importlib` followed by `make` in order to get the frozen version
# of the module updated. Not doing so will result in the Makefile to fail for
# all others who don't have a ./python around to freeze the module in the early
# stages of compilation.
#
# See importlib._setup() for what is injected into the global namespace.
# When editing this code be aware that code executed at import time CANNOT
# reference any injected objects! This includes not only global code but also
# anything specified at the class level.
# Module injected manually by _set_bootstrap_module()
_bootstrap = None
# Import builtin modules
import _imp
import _io
import sys
import _warnings
import marshal
_MS_WINDOWS = (sys.platform == 'win32')
if _MS_WINDOWS:
import nt as _os
import winreg
else:
import posix as _os
if _MS_WINDOWS:
path_separators = ['\\', '/']
else:
path_separators = ['/']
# Assumption made in _path_join()
assert all(len(sep) == 1 for sep in path_separators)
path_sep = path_separators[0]
path_separators = ''.join(path_separators)
_pathseps_with_colon = {f':{s}' for s in path_separators}
# Bootstrap-related code ######################################################
_CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win',
_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin'
_CASE_INSENSITIVE_PLATFORMS = (_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY
+ _CASE_INSENSITIVE_PLATFORMS_STR_KEY)
def _make_relax_case():
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS):
if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS_STR_KEY):
key = 'PYTHONCASEOK'
else:
key = b'PYTHONCASEOK'
def _relax_case():
"""True if filenames must be checked case-insensitively and ignore environment flags are not set."""
return not sys.flags.ignore_environment and key in _os.environ
else:
def _relax_case():
"""True if filenames must be checked case-insensitively."""
return False
return _relax_case
_relax_case = _make_relax_case()
def _pack_uint32(x):
"""Convert a 32-bit integer to little-endian."""
return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little')
def _unpack_uint32(data):
"""Convert 4 bytes in little-endian to an integer."""
assert len(data) == 4
return int.from_bytes(data, 'little')
def _unpack_uint16(data):
"""Convert 2 bytes in little-endian to an integer."""
assert len(data) == 2
return int.from_bytes(data, 'little')
def _path_join(*path_parts):
"""Replacement for os.path.join()."""
return path_sep.join([part.rstrip(path_separators)
for part in path_parts if part])
def _path_split(path):
"""Replacement for os.path.split()."""
if len(path_separators) == 1:
front, _, tail = path.rpartition(path_sep)
return front, tail
for x in reversed(path):
if x in path_separators:
front, tail = path.rsplit(x, maxsplit=1)
return front, tail
return '', path
def _path_stat(path):
"""Stat the path.
Made a separate function to make it easier to override in experiments
(e.g. cache stat results).
"""
return _os.stat(path)
def _path_is_mode_type(path, mode):
"""Test whether the path is the specified mode type."""
try:
stat_info = _path_stat(path)
except OSError:
return False
return (stat_info.st_mode & 0o170000) == mode
def _path_isfile(path):
"""Replacement for os.path.isfile."""
return _path_is_mode_type(path, 0o100000)
def _path_isdir(path):
"""Replacement for os.path.isdir."""
if not path:
path = _os.getcwd()
return _path_is_mode_type(path, 0o040000)
def _path_isabs(path):
"""Replacement for os.path.isabs.
Considers a Windows drive-relative path (no drive, but starts with slash) to
still be "absolute".
"""
return path.startswith(path_separators) or path[1:3] in _pathseps_with_colon
def _write_atomic(path, data, mode=0o666):
"""Best-effort function to write data to a path atomically.
Be prepared to handle a FileExistsError if concurrent writing of the
temporary file is attempted."""
# id() is used to generate a pseudo-random filename.
path_tmp = '{}.{}'.format(path, id(path))
fd = _os.open(path_tmp,
_os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666)
try:
# We first write data to a temporary file, and then use os.replace() to
# perform an atomic rename.
with _io.FileIO(fd, 'wb') as file:
file.write(data)
_os.replace(path_tmp, path)
except OSError:
try:
_os.unlink(path_tmp)
except OSError:
pass
raise
_code_type = type(_write_atomic.__code__)
# Finder/loader utility code ###############################################
# Magic word to reject .pyc files generated by other Python versions.
# It should change for each incompatible change to the bytecode.
#
# The value of CR and LF is incorporated so if you ever read or write
# a .pyc file in text mode the magic number will be wrong; also, the
# Apple MPW compiler swaps their values, botching string constants.
#
# There were a variety of old schemes for setting the magic number.
# The current working scheme is to increment the previous value by
# 10.
#
# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic
# number also includes a new "magic tag", i.e. a human readable string used
# to represent the magic number in __pycache__ directories. When you change
# the magic number, you must also set a new unique magic tag. Generally this
# can be named after the Python major version of the magic number bump, but
# it can really be anything, as long as it's different than anything else
# that's come before. The tags are included in the following table, starting
# with Python 3.2a0.
#
# Known values:
# Python 1.5: 20121
# Python 1.5.1: 20121
# Python 1.5.2: 20121
# Python 1.6: 50428
# Python 2.0: 50823
# Python 2.0.1: 50823
# Python 2.1: 60202
# Python 2.1.1: 60202
# Python 2.1.2: 60202
# Python 2.2: 60717
# Python 2.3a0: 62011
# Python 2.3a0: 62021
# Python 2.3a0: 62011 (!)
# Python 2.4a0: 62041
# Python 2.4a3: 62051
# Python 2.4b1: 62061
# Python 2.5a0: 62071
# Python 2.5a0: 62081 (ast-branch)
# Python 2.5a0: 62091 (with)
# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode)
# Python 2.5b3: 62101 (fix wrong code: for x, in ...)
# Python 2.5b3: 62111 (fix wrong code: x += yield)
# Python 2.5c1: 62121 (fix wrong lnotab with for loops and
# storing constants that should have been removed)
# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp)
# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode)
# Python 2.6a1: 62161 (WITH_CLEANUP optimization)
# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND)
# Python 2.7a0: 62181 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE)
# Python 2.7a0 62191 (introduce SETUP_WITH)
# Python 2.7a0 62201 (introduce BUILD_SET)
# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD)
# Python 3000: 3000
# 3010 (removed UNARY_CONVERT)
# 3020 (added BUILD_SET)
# 3030 (added keyword-only parameters)
# 3040 (added signature annotations)
# 3050 (print becomes a function)
# 3060 (PEP 3115 metaclass syntax)
# 3061 (string literals become unicode)
# 3071 (PEP 3109 raise changes)
# 3081 (PEP 3137 make __file__ and __name__ unicode)
# 3091 (kill str8 interning)
# 3101 (merge from 2.6a0, see 62151)
# 3103 (__file__ points to source file)
# Python 3.0a4: 3111 (WITH_CLEANUP optimization).
# Python 3.0b1: 3131 (lexical exception stacking, including POP_EXCEPT
#3021)
# Python 3.1a1: 3141 (optimize list, set and dict comprehensions:
# change LIST_APPEND and SET_ADD, add MAP_ADD #2183)
# Python 3.1a1: 3151 (optimize conditional branches:
# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE
#4715)
# Python 3.2a1: 3160 (add SETUP_WITH #6101)
# tag: cpython-32
# Python 3.2a2: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR #9225)
# tag: cpython-32
# Python 3.2a3 3180 (add DELETE_DEREF #4617)
# Python 3.3a1 3190 (__class__ super closure changed)
# Python 3.3a1 3200 (PEP 3155 __qualname__ added #13448)
# Python 3.3a1 3210 (added size modulo 2**32 to the pyc header #13645)
# Python 3.3a2 3220 (changed PEP 380 implementation #14230)
# Python 3.3a4 3230 (revert changes to implicit __class__ closure #14857)
# Python 3.4a1 3250 (evaluate positional default arguments before
# keyword-only defaults #16967)
# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override
# free vars #17853)
# Python 3.4a1 3270 (various tweaks to the __class__ closure #12370)
# Python 3.4a1 3280 (remove implicit class argument)
# Python 3.4a4 3290 (changes to __qualname__ computation #19301)
# Python 3.4a4 3300 (more changes to __qualname__ computation #19301)
# Python 3.4rc2 3310 (alter __qualname__ computation #20625)
# Python 3.5a1 3320 (PEP 465: Matrix multiplication operator #21176)
# Python 3.5b1 3330 (PEP 448: Additional Unpacking Generalizations #2292)
# Python 3.5b2 3340 (fix dictionary display evaluation order #11205)
# Python 3.5b3 3350 (add GET_YIELD_FROM_ITER opcode #24400)
# Python 3.5.2 3351 (fix BUILD_MAP_UNPACK_WITH_CALL opcode #27286)
# Python 3.6a0 3360 (add FORMAT_VALUE opcode #25483)
# Python 3.6a1 3361 (lineno delta of code.co_lnotab becomes signed #26107)
# Python 3.6a2 3370 (16 bit wordcode #26647)
# Python 3.6a2 3371 (add BUILD_CONST_KEY_MAP opcode #27140)
# Python 3.6a2 3372 (MAKE_FUNCTION simplification, remove MAKE_CLOSURE
# #27095)
# Python 3.6b1 3373 (add BUILD_STRING opcode #27078)
# Python 3.6b1 3375 (add SETUP_ANNOTATIONS and STORE_ANNOTATION opcodes
# #27985)
# Python 3.6b1 3376 (simplify CALL_FUNCTIONs & BUILD_MAP_UNPACK_WITH_CALL
#27213)
# Python 3.6b1 3377 (set __class__ cell from type.__new__ #23722)
# Python 3.6b2 3378 (add BUILD_TUPLE_UNPACK_WITH_CALL #28257)
# Python 3.6rc1 3379 (more thorough __class__ validation #23722)
# Python 3.7a1 3390 (add LOAD_METHOD and CALL_METHOD opcodes #26110)
# Python 3.7a2 3391 (update GET_AITER #31709)
# Python 3.7a4 3392 (PEP 552: Deterministic pycs | |
C.f)
a = C()
a.f = 100
self.assertEqual(a.f, 100)
self.assertEqual(a.calls, 0)
del a.f
with self.assertRaises(AttributeError):
del a.f
self.assertEqual(a.f, 42)
self.assertEqual(a.calls, 1)
def test_cached_property_slot_subtype(self):
class C:
__slots__ = ("f",)
def f(self):
return 42
class my_cached_prop(cached_property):
pass
with self.assertRaises(TypeError):
C.f = my_cached_prop(f, C.f)
def test_cached_property_slot_raises(self):
class C:
__slots__ = ("f",)
def f(self):
raise NoWayError()
C.f = cached_property(f, C.f)
with self.assertRaises(NoWayError):
C().f
def test_cached_property_slot_wrong_type(self):
"""apply a cached property from one type to another"""
class C:
__slots__ = ("abc",)
class D:
pass
D.abc = cached_property(lambda self: 42, C.abc)
a = D()
with self.assertRaises(TypeError):
x = a.abc
def test_cached_property_slot_wrong_type_set(self):
"""apply a cached property from one type to another"""
class C:
__slots__ = ("abc",)
class D:
pass
D.abc = cached_property(lambda self: 42, C.abc)
a = D()
with self.assertRaises(TypeError):
print(a.abc)
def test_cached_property_slot_name(self):
class C:
__slots__ = ("f",)
C.f = cached_property(lambda self: 42, C.f)
self.assertEqual(C.f.name, "f")
def test_cached_property_slot_property(self):
class C:
__slots__ = ("f",)
prev_f = C.f
C.f = cached_property(lambda self: 42, C.f)
self.assertEqual(C.f.slot, prev_f)
def test_cached_property_no_slot_property(self):
class C:
@cached_property
def f(self):
return 42
self.assertEqual(C.f.slot, None)
def test_cached_property_non_descriptor(self):
with self.assertRaises(TypeError):
cached_property(lambda self: 42, 42)
def test_cached_property_incompatible_descriptor(self):
with self.assertRaises(TypeError):
cached_property(lambda self: 42, GeneratorType.gi_frame)
def test_cached_property_readonly_descriptor(self):
with self.assertRaises(TypeError):
cached_property(lambda self: 42, range.start)
def test_warn_on_type(self):
class C:
pass
msg = type = attr = None
def cb(*args):
nonlocal msg, type, attr
msg = args[0]
type = args[1]
attr = args[2]
cinder.warn_on_inst_dict(C)
cinder.freeze_type(C)
cinder.cinder_set_warn_handler(cb)
C.foo = 42
self.assertEqual(
msg, "WARN002: Type modified that was flagged for immutability"
)
self.assertEqual(type, C)
self.assertEqual(attr, "foo")
def test_get_warn(self):
class C:
pass
def cb(*args):
pass
cinder.set_warn_handler(cb)
self.assertEqual(cinder.get_warn_handler(), cb)
cinder.set_warn_handler(None)
self.assertEqual(cinder.get_warn_handler(), None)
def test_warn_on_frozen_type(self):
class C:
pass
cinder.freeze_type(C)
with self.assertRaisesRegex(
TypeError, "can't call warn_on_inst_dict on a frozen type"
):
cinder.warn_on_inst_dict(C)
def test_gen_free_list(self):
knobs = cinder.getknobs()
self.assertEqual(knobs["genfreelist"], False)
cinder.setknobs({"genfreelist": True})
knobs = cinder.getknobs()
self.assertEqual(knobs["genfreelist"], True)
def f():
yield 42
a = f()
id1 = id(a)
del a
a = f()
id2 = id(a)
self.assertEqual(id1, id2)
cinder.setknobs({"genfreelist": False})
knobs = cinder.getknobs()
self.assertEqual(knobs["genfreelist"], False)
# def test_polymorphic_cache(self):
# knobs = cinder.getknobs()
# self.assertEqual(knobs["polymorphiccache"], False)
# cinder.setknobs({"polymorphiccache": True})
# knobs = cinder.getknobs()
# self.assertEqual(knobs["polymorphiccache"], True)
# cinder.setknobs({"polymorphiccache": False})
# knobs = cinder.getknobs()
# self.assertEqual(knobs["polymorphiccache"], False)
def test_strictmodule_type(self):
foo = strict_module_from_module(ModuleType("foo"))
self.assertTrue(type(foo) is StrictModule)
def test_strictmodule_uninitialized(self):
# An uninitialized module has no __dict__ or __name__,
# and __doc__ is None
foo = StrictModule.__new__(StrictModule)
self.assertTrue(foo.__dict__ == None)
self.assertRaises(SystemError, dir, foo)
try:
s = foo.__name__
self.fail("__name__ = %s" % repr(s))
except AttributeError:
pass
self.assertEqual(foo.__doc__, StrictModule.__doc__)
def test_strictmodule_uninitialized_missing_getattr(self):
foo = StrictModule.__new__(StrictModule)
self.assertRaisesRegex(
AttributeError,
"module has no attribute 'not_here'",
getattr,
foo,
"not_here",
)
def test_strictmodule_missing_getattr(self):
foo = strict_module_from_module(ModuleType("foo"))
self.assertRaisesRegex(
AttributeError,
"module 'foo' has no attribute 'not_here'",
getattr,
foo,
"not_here",
)
def test_strictmodule_no_docstring(self):
# Regularly initialized module, no docstring
foo = strict_module_from_module(ModuleType("foo"))
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, None)
self.assertIs(foo.__loader__, None)
self.assertIs(foo.__package__, None)
self.assertIs(foo.__spec__, None)
self.assertEqual(
foo.__dict__,
{
"__name__": "foo",
"__doc__": None,
"__loader__": None,
"__package__": None,
"__spec__": None,
},
)
def test_strictmodule_ascii_docstring(self):
# ASCII docstring
foo = strict_module_from_module(ModuleType("foo", "foodoc"))
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc")
self.assertEqual(
foo.__dict__,
{
"__name__": "foo",
"__doc__": "foodoc",
"__loader__": None,
"__package__": None,
"__spec__": None,
},
)
def test_strictmodule_unicode_docstring(self):
# Unicode docstring
foo = strict_module_from_module(ModuleType("foo", "foodoc\u1234"))
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc\u1234")
self.assertEqual(
foo.__dict__,
{
"__name__": "foo",
"__doc__": "foodoc\u1234",
"__loader__": None,
"__package__": None,
"__spec__": None,
},
)
def test_strictmodule_weakref(self):
m = strict_module_from_module(ModuleType("foo"))
wr = weakref.ref(m)
self.assertIs(wr(), m)
del m
gc_collect()
self.assertIs(wr(), None)
def test_strictmodule_getattr(self):
foo = create_strict_module(x=1)
self.assertEqual(foo.x, 1)
def test_strictmodule_setattr(self):
foo = create_strict_module(x=1)
with self.assertRaises(AttributeError):
foo.x = 2
def test_strictmodule_delattr(self):
foo = create_strict_module(x=1)
with self.assertRaises(AttributeError):
del foo.x
def test_strictmodule_setattr_with_patch_enabled(self):
foo = create_strict_module(x=1, enable_patching=True)
with self.assertRaises(AttributeError):
foo.x = 2
def test_strictmodule_patch_disabled(self):
foo = create_strict_module(x=1)
with self.assertRaises(AttributeError):
strict_module_patch(foo, "x", 2)
def test_strictmodule_patch_enabled(self):
foo = create_strict_module(x=1, enable_patching=True)
strict_module_patch(foo, "x", 2)
self.assertEqual(foo.x, 2)
def test_strictmodule_patch_enabled(self):
foo = strict_module_from_module(ModuleType("a"), enable_patching=True)
strict_module_patch(foo, "__dir__", 2)
self.assertEqual(foo.__dir__, 2)
def test_strictmodule_patch_enabled_2(self):
m = ModuleType("a")
d = m.__dict__
foo = StrictModule(m.__dict__, False)
d["__dir__"] = 2
self.assertEqual(foo.__dir__, 2)
def test_strictmodule_getattr_errors(self):
import test.bad_getattr as bga
from test import bad_getattr2
bga = strict_module_from_module(bga)
bad_getattr2 = strict_module_from_module(bad_getattr2)
self.assertEqual(bga.x, 1)
self.assertEqual(bad_getattr2.x, 1)
# we are not respecting module __getattr__ here
with self.assertRaises(TypeError):
bga.nope
with self.assertRaises(TypeError):
bad_getattr2.nope
del sys.modules["test.bad_getattr"]
if "test.bad_getattr2" in sys.modules:
del sys.modules["test.bad_getattr2"]
def test_strictmodule_dir(self):
import test.good_getattr as gga
gga = strict_module_from_module(gga)
self.assertEqual(dir(gga), ["a", "b", "c"])
del sys.modules["test.good_getattr"]
def test_strictmodule_dir_errors(self):
import test.bad_getattr as bga
from test import bad_getattr2
bga = strict_module_from_module(bga)
bad_getattr2 = strict_module_from_module(bad_getattr2)
with self.assertRaises(TypeError):
dir(bga)
with self.assertRaises(TypeError):
dir(bad_getattr2)
del sys.modules["test.bad_getattr"]
if "test.bad_getattr2" in sys.modules:
del sys.modules["test.bad_getattr2"]
def test_strictmodule_getattr_tricky(self):
from test import bad_getattr3
bad_getattr3 = strict_module_from_module(bad_getattr3)
# these lookups should not crash
with self.assertRaises(AttributeError):
bad_getattr3.one
with self.assertRaises(AttributeError):
bad_getattr3.delgetattr
if "test.bad_getattr3" in sys.modules:
del sys.modules["test.bad_getattr3"]
def test_strictmodule_repr_minimal(self):
# reprs when modules have no __file__, __name__, or __loader__
m = ModuleType("foo")
del m.__name__
m = strict_module_from_module(m)
self.assertEqual(repr(m), "<module '?'>")
def test_strictmodule_repr_with_name(self):
m = ModuleType("foo")
m = strict_module_from_module(m)
self.assertEqual(repr(m), "<module 'foo'>")
def test_strictmodule_repr_with_name_and_filename(self):
m = ModuleType("foo")
m.__file__ = "/tmp/foo.py"
m = strict_module_from_module(m)
self.assertEqual(repr(m), "<module 'foo' from '/tmp/foo.py'>")
def test_strictmodule_repr_with_filename_only(self):
m = ModuleType("foo")
del m.__name__
m.__file__ = "/tmp/foo.py"
m = strict_module_from_module(m)
self.assertEqual(repr(m), "<module '?' from '/tmp/foo.py'>")
def test_strictmodule_modify_dict_patch_disabled(self):
foo = create_strict_module(x=1, enable_patching=False)
foo.__dict__["x"] = 2
self.assertEqual(foo.x, 1)
def test_strictmodule_modify_dict_patch_enabled(self):
foo = create_strict_module(x=1, enable_patching=True)
foo.__dict__["x"] = 2
self.assertEqual(foo.x, 1)
def test_strictmodule_unassigned_field(self):
d = {"<assigned:x>": False, "x": 1}
foo = StrictModule(d, False)
self.assertNotIn("x", foo.__dict__)
def test_const_object(self):
class MyObj:
magic = [42]
a = MyObj
b = const(a)
c = [b]
self.assertEqual(c[0], b)
self.assertEqual(b.magic, const([42]))
d = c[0]
e = d.magic
with self.assertRaises(AttributeError):
e.magic = 33
def async_test(f):
assert inspect.iscoroutinefunction(f)
@wraps(f)
def impl(*args, **kwargs):
asyncio.run(f(*args, **kwargs))
return impl
class AsyncCinderTest(unittest.TestCase):
def setUp(self) -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.loop = loop
def tearDown(self):
self.loop.close()
asyncio.set_event_loop_policy(None)
@async_test
async def test_cached_property(self):
class C:
def __init__(self):
self.calls = 0
@async_cached_property
async def f(self):
self.calls += 1
return 42
a = C()
self.assertEqual(await a.f, 42)
self.assertEqual(a.calls, 1)
self.assertEqual(await a.f, 42)
self.assertEqual(a.calls, 1)
@async_test
async def test_cached_property_loop(self):
val = object()
class C:
@async_cached_property
async def f(self):
return val
a = C()
for i in range(1000):
x = await a.f
self.assertEqual(x, val)
@async_test
async def test_cached_property_raises(self):
class C:
@async_cached_property
async def f(self):
raise NoWayError()
with self.assertRaises(NoWayError):
await C().f
@async_test
async def test_cached_property_no_dict(self):
class C:
__slots__ = ()
@async_cached_property
async def f(self):
return 42
with self.assertRaises(AttributeError):
a = await C().f
@async_test
async def test_cached_property_name(self):
class C:
@async_cached_property
async def f(self):
return 42
self.assertEqual(C.f.name, "f")
@async_test
async def test_cached_property_func(self):
class C:
pass
async def f(self):
return 42
C.f = async_cached_property(f)
self.assertEqual(C.f.func, f)
@async_test
async def test_cached_property_doc(self):
class C:
@async_cached_property
async def f(self):
return 42
self.assertEqual(C.f.__doc__, None)
class D:
@async_cached_property
async def f(self):
"hi there"
return 42
self.assertEqual(D.f.__doc__, "hi there")
D.f.func.__doc__ = "updated"
self.assertEqual(D.f.__doc__, "updated")
@async_test
async def test_cached_property_slot(self):
class C:
__slots__ = ("f", "calls")
def __init__(self):
self.calls = 0
async def f(self):
self.calls += 1
return 42
C.f = async_cached_property(f, C.f)
a = C()
self.assertEqual(await a.f, 42)
self.assertEqual(a.calls, 1)
self.assertEqual(await a.f, 42)
self.assertEqual(a.calls, 1)
@async_test
async def test_cached_property_slot_raises(self):
class C:
__slots__ = ("f",)
async def f(self):
raise NoWayError()
C.f = async_cached_property(f, C.f)
with self.assertRaises(NoWayError):
await C().f
@async_test
async def test_cached_property_slot_wrong_type(self):
"""apply a cached property from one type to another"""
class C:
__slots__ = ("abc",)
class D:
pass
async def f(self):
return 42
D.abc = async_cached_property(f, C.abc)
a = D()
with self.assertRaises(TypeError):
x = await a.abc
@async_test
async def test_cached_property_slot_name(self):
class C:
__slots__ = ("f",)
async def f(self):
return 42
C.f = async_cached_property(f, C.f)
self.assertEqual(C.f.name, "f")
@async_test
async def test_cached_property_slot_property(self):
class C:
__slots__ = ("f",)
async def f(self):
return 42
prev_f = C.f
C.f = async_cached_property(f, C.f)
self.assertEqual(C.f.slot, prev_f)
@async_test
async def test_cached_property_no_slot_property(self):
class C:
@async_cached_property
async def f(self):
return 42
self.assertEqual(C.f.slot, None)
@async_test
async def test_cached_property_non_descriptor(self):
async def f(self):
return 42
with self.assertRaises(TypeError):
async_cached_property(f, 42)
@async_test
async def test_cached_property_incompatible_descriptor(self):
async def f(self):
return 42
with self.assertRaises(TypeError):
async_cached_property(f, GeneratorType.gi_frame)
@async_test
async def test_cached_property_readonly_descriptor(self):
async def f(self):
return 42
with self.assertRaises(TypeError):
async_cached_property(f, range.start)
@async_test
async def test_cached_class_prop(self):
class C:
@async_cached_classproperty
async def f(self):
return 42
self.assertEqual(await C.f, 42)
@async_test
async def test_cached_class_prop_called_once(self):
class C:
calls = 0
@async_cached_classproperty
async def f(cls):
cls.calls += 1
| |
<reponame>astroumd/admit
""" .. _filter1D:
Filter1D --- 1-dimensional spectral filtering.
----------------------------------------------
This module defines the 1D filter methods.
"""
import numpy as np
import math
from copy import deepcopy
from collections import OrderedDict
class Filter1D(object):
""" This class defines and runs 1D spectral filters. The currently available
filters are Gaussian, Hanning, Triangle, Welch, Boxcar, and Savitzky
Golay. The output spectrum will be of the same length as the input
spectrum, however some edge channels may be zeroed by some methods,
depending on the input paramters.
Parameters
----------
spec : numpy array
1D numpy array of the input spectrum (just the amplitudes).
method : str
The smoothing filter to apply: boxcar, gaussian, welch, hanning,
triangle, or savgol.
No default. Minimum matching is enabled with a minimum of 3
characters, i.e. box = boxcar.
keyval : various
Any keyword value pairs for the specific method chosen, see the
notes for specific keywords.
Attributes
----------
spec : numpy array
The spectrum.
len : int
The length of the spectrum.
methods : list
A list of the available filters.
[method]_args : dict
A dictionary for each method giving its keywords and defaults
(e.g. boxcar_args).
method : str
The method being used.
Notes
-----
Details of the different filter keywords and defaults:
.. tabularcolumns:: |p{1.5cm}|p{2cm}|p{0.5cm}|p{8cm}|
+------------+---------------+------+----------------------------------------------+
| Filter | Keyword | Def. | Description |
+============+===============+======+==============================================+
| "boxcar" | "width" | 3 | Number of channels to average together |
+------------+---------------+------+----------------------------------------------+
| "gaussian" | "width" | 7 | Number of channels to span with the gaussian |
+------------+---------------+------+----------------------------------------------+
| "hanning" | "width" | 5 | Number of channels to include in the cos |
+------------+---------------+------+----------------------------------------------+
| "triangle" | "width" | 5 | Number of channels to span with the triangle |
+------------+---------------+------+----------------------------------------------+
| "welch" | "width" | 5 | Number of channels to use in the function |
+------------+---------------+------+----------------------------------------------+
| "savgol" | "window_size" | 7 | Number of channels to use in the calculation |
+------------+---------------+------+----------------------------------------------+
| | "order" | 3 | Order of the poynomial fit (must be odd) |
+------------+---------------+------+----------------------------------------------+
| | "deriv" | 0 | The number of the derivative to compute |
| | | | (0 = just smooth) |
+------------+---------------+------+----------------------------------------------+
"""
boxcar_args = OrderedDict([("width", 3)])
gaussian_args = OrderedDict([("width", 7)])
welch_args = OrderedDict([("width", 5)])
hanning_args = OrderedDict([("width", 5)])
triangle_args = OrderedDict([("width", 5)])
savgol_args = OrderedDict([("window_size", 7),
("order" , 3),
("deriv" , 0),
("rate" , 1)])
methods = ["boxcar",
"gaussian",
"welch",
"hanning",
"triangle",
"savgol"]
def __init__(self, spec, method, **keyval):
if len(spec.shape) > 1:
raise Exception("Spectrum is not 1D but you are trying to use a 1D filter.")
self.spec = spec
self.len = self.spec.shape[0]
# keywords for the different algorithms
self.method = self.checkmethod(method)
for k, v in keyval.iteritems():
try:
a = getattr(self, method + "_args")[k]
except:
raise Exception("Unknown input %s for smoothing." % (k))
if type(a) != type(v):
raise Exception("Cannot change the type of an attribute. %s must be a %s not a %s." % (k, type(a), type(v)))
getattr(self, method + "_args")[k] = v
def isodd(self, value):
""" Method to determine if a number is odd
Parameters
----------
value : int
The number to check
Returns
-------
bool, True if the number is odd, False if it is even
"""
return value%2 == 1
def checkmethod(self, method):
""" Method to interpret the input method and determine the full method
name
Parameters
----------
method : str
The method to use, minimal matching is possible, with a minimum
of 3 characters (e.g. "box" will be interpreted to be "boxcar")
Returns
-------
None
"""
if len(method) < 3:
raise Exception("Minimum of 3 characters are needed for minimal matching of strings.")
for m in self.methods:
if m.startswith(method):
return m
raise Exception("Unknown method %s given for smoothing. Available methods are: %s" % (method, str(self.methods)))
def buffer(self, nchan):
""" Method to buffer/pad an array so that filters can work all the way
to the edge. Uses np.pad with mode='reflect'
Parameters
----------
nchan : int
The number of channels to add to each end of the array
Returns
-------
Numpy array containing the buffered input array
"""
return np.pad(self.spec, (nchan, ), mode='reflect')
def boxcar(self, width):
r""" Method to apply a boxcar filter to a spectrum. The filter for point
x[i] is defined as:
.. math::
x[i] = \frac{1}{N} \sum_{n=0}^{N} x[i + n - \frac{N - 1}{2}]
where N is the width of the filter.
Parameters
----------
width : int
The width of the box to use in channels, must be odd
Returns
-------
numpy array
The smoothed spectrum, (width - 1)/2 edge channels will be
zeroed
"""
if not self.isodd(width):
raise Exception("Boxcar width must be an odd number.")
side = (width - 1) / 2
kernel = np.array([1.0] * width)
kernel /= kernel.sum()
return np.convolve(self.buffer(side), kernel, mode="valid")
def gaussian(self, width):
r""" Method to apply a Gaussian filter to a spectrum. The filter for
point x[i] is defined as:
.. math::
x[i] = \sum_{n=0}^{N} x[i + n - \frac{N - 1}{2}] e^{-\frac{1}{2}\left(\frac{n-(N-1)/2}{\sigma(N-1)/2}\right)^2}
where N is the width of the filter.
Parameters
----------
width : int
The number of channels to span with the gaussian for each
iteration, must be odd
Returns
-------
numpy array
The smoothed spectrum, (width - 1)/2 edge channels will be zeroed
"""
if not self.isodd(width):
raise Exception("Gaussian width must be an odd number.")
side = (width - 1) / 2
kernel = np.zeros(width)
for j in range(width):
kernel[j] = math.exp(-0.5 * pow(((float(j) - ((float(width) - 1.0) /
2.0)) / (0.2 * (float(width) - 1.0) / 2.0)), 2))
kernel /= kernel.sum()
return np.convolve(self.buffer(side), kernel, mode="valid")
def welch(self, width):
r""" Method to apply a Welch filter to a spectrum. The filter for point x[i]
is defined as:
.. math::
x[i] = \sum_{n=0}^{N} x[i + n - \frac{N - 1}{2}] \left(1 - \left(\frac{n - \frac{N-1}{2}}{\frac{N-1}{2}}\right)^2\right)
where N is the width of the filter.
Parameters
----------
width : int
The number of channels to span with the function for each
iteration, must be odd
Returns
-------
numpy array
The smoothed spectrum, (width - 1)/2 edge channels will be zeroed
"""
if not self.isodd(width):
raise Exception("Welch width must be an odd number.")
width += 2 # must add 2 to get the proper width
side = (width - 1) / 2
kernel = np.zeros(width)
for j in range(width):
kernel[j] = (1 - math.pow((j - (float(width - 1) / 2.0)) /
(float(width - 1) / 2.0), 2))
kernel /= kernel.sum()
return np.convolve(self.buffer(side), kernel, mode="valid")
def hanning(self, width):
r""" Method to apply a Hanning filter to a spectrum. The filter for
point x[i] is defined as:
.. math::
x[i] = \sum_{n=0}^{N} x[i + n - \frac{N - 1}{2}] 0.5 \left(1 - \cos\left(\frac{2\pi n}{N-1}\right)\right)
where N is the width of the filter.
Parameters
----------
width : int
The number of channels to span with the function for each
iteration, must be odd
Returns
-------
numpy array
The smoothed spectrum, (width - 1)/2 edge channels will be zeroed
"""
if not self.isodd(width):
raise Exception("Hanning width must be an odd number.")
width += 2 # must add 2 to get the proper width
side = (width - 1) / 2
kernel = np.zeros(width)
for j in range(width):
kernel[j] = 0.5 * (1.0 - math.cos((2.0 * math.pi * j) / float(width - 1)))
kernel /= kernel.sum()
return np.convolve(self.buffer(side), kernel, mode="valid")
def triangle(self, width):
r""" Method to apply a Triangular filter to a spectrum. The filter for
point x[i] is defined as:
.. math::
x[i] = \sum_{n=0}^{N} x[i + n - \frac{N - 1}{2}] \left(1 - \left|\frac{n-\frac{N-1}{2}}{\frac{N}{2}}\right|\right)
where N is the width of the filter.
Parameters
----------
width : int
The number of channels to span with the function for each
iteration, must be odd
Returns
-------
numpy array
The | |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.retail.v2',
manifest={
'Audience',
'ColorInfo',
'CustomAttribute',
'FulfillmentInfo',
'Image',
'Interval',
'PriceInfo',
'Rating',
'UserInfo',
'Promotion',
},
)
class Audience(proto.Message):
r"""An intended audience of the
[Product][google.cloud.retail.v2.Product] for whom it's sold.
Attributes:
genders (Sequence[str]):
The genders of the audience. Strongly encouraged to use the
standard values: "male", "female", "unisex".
At most 5 values are allowed. Each value must be a UTF-8
encoded string with a length limit of 128 characters.
Otherwise, an INVALID_ARGUMENT error is returned.
Google Merchant Center property
`gender <https://support.google.com/merchants/answer/6324479>`__.
Schema.org property
`Product.audience.suggestedGender <https://schema.org/suggestedGender>`__.
age_groups (Sequence[str]):
The age groups of the audience. Strongly encouraged to use
the standard values: "newborn" (up to 3 months old),
"infant" (3–12 months old), "toddler" (1–5 years old),
"kids" (5–13 years old), "adult" (typically teens or older).
At most 5 values are allowed. Each value must be a UTF-8
encoded string with a length limit of 128 characters.
Otherwise, an INVALID_ARGUMENT error is returned.
Google Merchant Center property
`age_group <https://support.google.com/merchants/answer/6324463>`__.
Schema.org property
`Product.audience.suggestedMinAge <https://schema.org/suggestedMinAge>`__
and
`Product.audience.suggestedMaxAge <https://schema.org/suggestedMaxAge>`__.
"""
genders = proto.RepeatedField(
proto.STRING,
number=1,
)
age_groups = proto.RepeatedField(
proto.STRING,
number=2,
)
class ColorInfo(proto.Message):
r"""The color information of a
[Product][google.cloud.retail.v2.Product].
Attributes:
color_families (Sequence[str]):
The standard color families. Strongly recommended to use the
following standard color groups: "Red", "Pink", "Orange",
"Yellow", "Purple", "Green", "Cyan", "Blue", "Brown",
"White", "Gray", "Black" and "Mixed". Normally it is
expected to have only 1 color family. May consider using
single "Mixed" instead of multiple values.
A maximum of 5 values are allowed. Each value must be a
UTF-8 encoded string with a length limit of 128 characters.
Otherwise, an INVALID_ARGUMENT error is returned.
Google Merchant Center property
`color <https://support.google.com/merchants/answer/6324487>`__.
Schema.org property
`Product.color <https://schema.org/color>`__.
colors (Sequence[str]):
The color display names, which may be different from
standard color family names, such as the color aliases used
in the website frontend. Normally it is expected to have
only 1 color. May consider using single "Mixed" instead of
multiple values.
A maximum of 25 colors are allowed. Each value must be a
UTF-8 encoded string with a length limit of 128 characters.
Otherwise, an INVALID_ARGUMENT error is returned.
Google Merchant Center property
`color <https://support.google.com/merchants/answer/6324487>`__.
Schema.org property
`Product.color <https://schema.org/color>`__.
"""
color_families = proto.RepeatedField(
proto.STRING,
number=1,
)
colors = proto.RepeatedField(
proto.STRING,
number=2,
)
class CustomAttribute(proto.Message):
r"""A custom attribute that is not explicitly modeled in
[Product][google.cloud.retail.v2.Product].
Attributes:
text (Sequence[str]):
The textual values of this custom attribute. For example,
``["yellow", "green"]`` when the key is "color".
At most 400 values are allowed. Empty values are not
allowed. Each value must be a UTF-8 encoded string with a
length limit of 256 characters. Otherwise, an
INVALID_ARGUMENT error is returned.
Exactly one of
[text][google.cloud.retail.v2.CustomAttribute.text] or
[numbers][google.cloud.retail.v2.CustomAttribute.numbers]
should be set. Otherwise, an INVALID_ARGUMENT error is
returned.
numbers (Sequence[float]):
The numerical values of this custom attribute. For example,
``[2.3, 15.4]`` when the key is "lengths_cm".
At most 400 values are allowed.Otherwise, an
INVALID_ARGUMENT error is returned.
Exactly one of
[text][google.cloud.retail.v2.CustomAttribute.text] or
[numbers][google.cloud.retail.v2.CustomAttribute.numbers]
should be set. Otherwise, an INVALID_ARGUMENT error is
returned.
searchable (bool):
If true, custom attribute values are searchable by text
queries in
[SearchService.Search][google.cloud.retail.v2.SearchService.Search].
This field is ignored in a
[UserEvent][google.cloud.retail.v2.UserEvent].
Only set if type
[text][google.cloud.retail.v2.CustomAttribute.text] is set.
Otherwise, a INVALID_ARGUMENT error is returned.
indexable (bool):
If true, custom attribute values are indexed, so that it can
be filtered, faceted or boosted in
[SearchService.Search][google.cloud.retail.v2.SearchService.Search].
This field is ignored in a
[UserEvent][google.cloud.retail.v2.UserEvent].
See
[SearchRequest.filter][google.cloud.retail.v2.SearchRequest.filter],
[SearchRequest.facet_specs][google.cloud.retail.v2.SearchRequest.facet_specs]
and
[SearchRequest.boost_spec][google.cloud.retail.v2.SearchRequest.boost_spec]
for more details.
"""
text = proto.RepeatedField(
proto.STRING,
number=1,
)
numbers = proto.RepeatedField(
proto.DOUBLE,
number=2,
)
searchable = proto.Field(
proto.BOOL,
number=3,
optional=True,
)
indexable = proto.Field(
proto.BOOL,
number=4,
optional=True,
)
class FulfillmentInfo(proto.Message):
r"""Fulfillment information, such as the store IDs for in-store
pickup or region IDs for different shipping methods.
Attributes:
type_ (str):
The fulfillment type, including commonly used types (such as
pickup in store and same day delivery), and custom types.
Customers have to map custom types to their display names
before rendering UI.
Supported values:
- "pickup-in-store"
- "ship-to-store"
- "same-day-delivery"
- "next-day-delivery"
- "custom-type-1"
- "custom-type-2"
- "custom-type-3"
- "custom-type-4"
- "custom-type-5"
If this field is set to an invalid value other than these,
an INVALID_ARGUMENT error is returned.
place_ids (Sequence[str]):
The IDs for this
[type][google.cloud.retail.v2.FulfillmentInfo.type], such as
the store IDs for
[FulfillmentInfo.type.pickup-in-store][google.cloud.retail.v2.FulfillmentInfo.type]
or the region IDs for
[FulfillmentInfo.type.same-day-delivery][google.cloud.retail.v2.FulfillmentInfo.type].
A maximum of 3000 values are allowed. Each value must be a
string with a length limit of 30 characters, matching the
pattern ``[a-zA-Z0-9_-]+``, such as "store1" or "REGION-2".
Otherwise, an INVALID_ARGUMENT error is returned.
"""
type_ = proto.Field(
proto.STRING,
number=1,
)
place_ids = proto.RepeatedField(
proto.STRING,
number=2,
)
class Image(proto.Message):
r"""[Product][google.cloud.retail.v2.Product] thumbnail/detail image.
Attributes:
uri (str):
Required. URI of the image.
This field must be a valid UTF-8 encoded URI with a length
limit of 5,000 characters. Otherwise, an INVALID_ARGUMENT
error is returned.
Google Merchant Center property
`image_link <https://support.google.com/merchants/answer/6324350>`__.
Schema.org property
`Product.image <https://schema.org/image>`__.
height (int):
Height of the image in number of pixels.
This field must be nonnegative. Otherwise, an
INVALID_ARGUMENT error is returned.
width (int):
Width of the image in number of pixels.
This field must be nonnegative. Otherwise, an
INVALID_ARGUMENT error is returned.
"""
uri = proto.Field(
proto.STRING,
number=1,
)
height = proto.Field(
proto.INT32,
number=2,
)
width = proto.Field(
proto.INT32,
number=3,
)
class Interval(proto.Message):
r"""A floating point interval.
Attributes:
minimum (float):
Inclusive lower bound.
exclusive_minimum (float):
Exclusive lower bound.
maximum (float):
Inclusive upper bound.
exclusive_maximum (float):
Exclusive upper bound.
"""
minimum = proto.Field(
proto.DOUBLE,
number=1,
oneof='min',
)
exclusive_minimum = proto.Field(
proto.DOUBLE,
number=2,
oneof='min',
)
maximum = proto.Field(
proto.DOUBLE,
number=3,
oneof='max',
)
exclusive_maximum = proto.Field(
proto.DOUBLE,
number=4,
oneof='max',
)
class PriceInfo(proto.Message):
r"""The price information of a
[Product][google.cloud.retail.v2.Product].
Attributes:
currency_code (str):
The 3-letter currency code defined in `ISO
4217 <https://www.iso.org/iso-4217-currency-codes.html>`__.
If this field is an unrecognizable currency code, an
INVALID_ARGUMENT error is returned.
The
[Product.Type.VARIANT][google.cloud.retail.v2.Product.Type.VARIANT]
[Product][google.cloud.retail.v2.Product]s with the same
[Product.primary_product_id][google.cloud.retail.v2.Product.primary_product_id]
must share the same
[currency_code][google.cloud.retail.v2.PriceInfo.currency_code].
Otherwise, a FAILED_PRECONDITION error is returned.
price (float):
Price of the product.
Google Merchant Center property
`price <https://support.google.com/merchants/answer/6324371>`__.
Schema.org property
`Offer.priceSpecification <https://schema.org/priceSpecification>`__.
original_price (float):
Price of the product without any discount. If zero, by
default set to be the
[price][google.cloud.retail.v2.PriceInfo.price].
cost (float):
The costs associated with the sale of a particular product.
Used for gross profit reporting.
- Profit = [price][google.cloud.retail.v2.PriceInfo.price]
- [cost][google.cloud.retail.v2.PriceInfo.cost]
Google Merchant Center property
`cost_of_goods_sold <https://support.google.com/merchants/answer/9017895>`__.
price_effective_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp when the
[price][google.cloud.retail.v2.PriceInfo.price] starts to be
effective. This can be set as a future timestamp, and the
[price][google.cloud.retail.v2.PriceInfo.price] is only used
for search after
[price_effective_time][google.cloud.retail.v2.PriceInfo.price_effective_time].
If so, the
[original_price][google.cloud.retail.v2.PriceInfo.original_price]
must be set and
[original_price][google.cloud.retail.v2.PriceInfo.original_price]
is used before
[price_effective_time][google.cloud.retail.v2.PriceInfo.price_effective_time].
Do not set if
[price][google.cloud.retail.v2.PriceInfo.price] is always
effective because it will cause additional latency during
search.
price_expire_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp when the
[price][google.cloud.retail.v2.PriceInfo.price] stops to be
effective. The
[price][google.cloud.retail.v2.PriceInfo.price] is used for
search before
[price_expire_time][google.cloud.retail.v2.PriceInfo.price_expire_time].
If this field is set, the
[original_price][google.cloud.retail.v2.PriceInfo.original_price]
must be set and
[original_price][google.cloud.retail.v2.PriceInfo.original_price]
is used after
[price_expire_time][google.cloud.retail.v2.PriceInfo.price_expire_time].
Do not set if
[price][google.cloud.retail.v2.PriceInfo.price] is always
effective because it will cause additional latency during
search.
price_range (google.cloud.retail_v2.types.PriceInfo.PriceRange):
Output only. The price range of all the child
[Product.Type.VARIANT][google.cloud.retail.v2.Product.Type.VARIANT]
[Product][google.cloud.retail.v2.Product]s grouped together
on the
[Product.Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2.Product]. Only populated
for
[Product.Type.PRIMARY][google.cloud.retail.v2.Product.Type.PRIMARY]
[Product][google.cloud.retail.v2.Product]s.
Note: This field is OUTPUT_ONLY for
[ProductService.GetProduct][google.cloud.retail.v2.ProductService.GetProduct].
Do not set this field in API requests.
"""
class PriceRange(proto.Message):
r"""The price range of all
[variant][google.cloud.retail.v2.Product.Type.VARIANT]
[Product][google.cloud.retail.v2.Product] having the same
[Product.primary_product_id][google.cloud.retail.v2.Product.primary_product_id].
Attributes:
price (google.cloud.retail_v2.types.Interval):
The inclusive
[Product.pricing_info.price][google.cloud.retail.v2.PriceInfo.price]
interval of all
[variant][google.cloud.retail.v2.Product.Type.VARIANT]
[Product][google.cloud.retail.v2.Product] having the | |
<filename>python/foglamp/plugins/south/wind_turbine/wind_turbine.py
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
""" Module for Phidget poll mode plugin
The following is intended as the south service plugin of Dianomic wind turbine demo of FogLAMP.
Phidget based sensors:
- Temperature & Humidity: HUM1000_0 (https://www.phidgets.com/?tier=3&catid=14&pcid=12&prodid=644)
- Spatial: MOT1101_0 (https://www.phidgets.com/?tier=3&catid=10&pcid=8&prodid=975)
- Rotary: 3531_0 (https://www.phidgets.com/?tier=3&catid=103&pcid=83&prodid=404)
- Current: VCP1100_0 (https://www.phidgets.com/?tier=3&catid=16&pcid=14&prodid=983)
"""
import copy
import datetime
import logging
import math
import time
from foglamp.common import logger
from foglamp.plugins.common import utils
from foglamp.services.south import exceptions
from Phidget22.Devices.Accelerometer import *
from Phidget22.Devices.CurrentInput import *
from Phidget22.Devices.Encoder import *
from Phidget22.Devices.Gyroscope import *
from Phidget22.Devices.HumiditySensor import *
from Phidget22.Devices.Magnetometer import *
from Phidget22.Devices.TemperatureSensor import *
from Phidget22.PhidgetException import *
from Phidget22.Phidget import *
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2019 Dianomic Systems Inc."
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_DEFAULT_CONFIG = {
'plugin': {
'description': 'Wind Turbine Poll Plugin',
'type': 'string',
'default': 'wind_turbine',
'readonly': 'true'
},
'hubSN': {
'description': 'VINT Hub Serial Number',
'type': 'string',
'default': '538854',
'order': '1',
'displayName': 'VINT Hub SN'
},
'assetPrefix': {
'description': 'Prefix of asset name',
'type': 'string',
'default': 'wind_turbine/',
'order': '2',
'displayName': 'Asset Name Prefix'
},
'tempHumAssetName': {
'description': 'Humidity/Temperature sensor asset name',
'type': 'string',
'default': 'weather',
'order': '3',
'displayName': 'Humidity/Temperature Asset Name'
},
'tempHumPort': {
'description': 'VINT Hub port of temperature/humidity sensor',
'type': 'string',
'default': '0',
'order': '4',
'displayName': 'Humidity/Temperature Port'
},
'tempHumPoll': {
'description': 'Obtain Humidity/Temperature every nth time the plugin is pulled',
'type': 'integer',
'default': '1',
'order': '5',
'displayName': 'Humidity/Temperature Poll',
},
'tempHumEnable': {
'description': 'Enable Humidity/Temperature',
'type': 'boolean',
'default': 'true',
'order': '6',
'displayName': 'Enable Humidity/Temperature'
},
'currentAssetName': {
'description': 'Current sensor asset name',
'type': 'string',
'default': 'current',
'order': '7',
'displayName': 'Current Asset Name'
},
'currentPort': {
'description': 'VINT Hub port of current sensor',
'type': 'string',
'default': '3',
'order': '8',
'displayName': 'Current Port'
},
'currentPoll': {
'description': 'Obtain current every nth time the plugin is pulled',
'type': 'integer',
'default': '1',
'order': '9',
'displayName': 'Current Poll'
},
'currentEnable': {
'description': 'Enable/Disable Current sensor',
'type': 'boolean',
'default': 'true',
'order': '10',
'displayName': 'Enable Current'
},
'encoderAssetName': {
'description': 'Encoder sensor asset name',
'type': 'string',
'default': 'encoder',
'order': '11',
'displayName': 'Encoder Asset Name'
},
'encoderPort': {
'description': 'VINT Hub port of encoder sensor',
'type': 'string',
'default': '1',
'order': '12',
'displayName': 'Encoder Port'
},
'encoderPoll': {
'description': 'Obtain encoder every nth time the plugin is pulled',
'type': 'integer',
'default': '1',
'order': '13',
'displayName': 'Encoder Poll'
},
'encoderEnable': {
'description': 'Enable Encoder Sensor',
'type': 'boolean',
'default': 'true',
'order': '14',
'displayName': 'Enable Encoder'
},
'spatialPort': {
'description': 'VINT Hub port of spatial sensors',
'type': 'string',
'default': '2',
'order': '15',
'displayName': 'Spatial Port'
},
'accelerometerAssetName': {
'description': 'accelerometer sensor asset name',
'type': 'string',
'default': 'accelerometer',
'order': '16',
'displayName': 'accelerometer Asset Name'
},
'accelerometerPoll': {
'description': 'Obtain accelerometer every nth time the plugin is pulled',
'type': 'integer',
'default': '1',
'order': '17',
'displayName': 'Acceleration Poll'
},
'accelerometerEnable': {
'description': 'Enable Acceleration Sensor',
'type': 'boolean',
'default': 'true',
'order': '18',
'displayName': 'Acceleration Encoder'
},
'gyroscopeAssetName': {
'description': 'gyroscope sensor asset name',
'type': 'string',
'default': 'gyroscope',
'order': '19',
'displayName': 'gyroscope Asset Name'
},
'gyroscopePoll': {
'description': 'Obtain gyroscope every nth time the plugin is pulled',
'type': 'integer',
'default': '1',
'order': '20',
'displayName': 'Gyroscope Poll'
},
'gyroscopeEnable': {
'description': 'Enable Gyroscope Sensor',
'type': 'boolean',
'default': 'true',
'order': '21',
'displayName': 'Enable Gyroscope'
},
'magnetometerAssetName': {
'description': 'magnetometer sensor asset name',
'type': 'string',
'default': 'magnetometer',
'order': '22',
'displayName': 'magnetometer Asset Name'
},
'magnetometerPoll': {
'description': 'Obtain magnetometer every nth time the plugin is pulled',
'type': 'integer',
'default': '1',
'order': '23',
'displayName': 'Magnetometer Poll'
},
'magnetometerEnable': {
'description': 'Enable Magnetometer Sensor',
'type': 'boolean',
'default': 'true',
'order': '24',
'displayName': 'Enable Magnetometer'
}
}
_LOGGER = logger.setup(__name__, level=logging.INFO)
def plugin_info():
""" Returns information about the plugin.
Args:
Returns:
dict: plugin information
Raises:
"""
return {
'name': 'wind_turbine Poll Plugin',
'version': '1.7.0',
'mode': 'poll',
'type': 'south',
'interface': '1.0',
'config': _DEFAULT_CONFIG
}
def plugin_init(config):
""" Initialise the plugin.
Args:
config: JSON configuration document for the South plugin configuration category
Returns:
data: JSON object to be used in future calls to the plugin
Raises:
"""
try:
data = copy.deepcopy(config)
if data['tempHumEnable']['value'] == 'true':
data['humidity'] = HumiditySensor()
data['humidity'].setDeviceSerialNumber(int(data['hubSN']['value']))
data['humidity'].setHubPort(int(data['tempHumPort']['value']))
data['humidity'].setIsHubPortDevice(False)
data['humidity'].setChannel(0)
data['humidity'].openWaitForAttachment(5000)
try:
data['humidity'].getHumidity()
except Exception:
pass
data['temperature'] = TemperatureSensor()
data['temperature'].setDeviceSerialNumber(int(data['hubSN']['value']))
data['temperature'].setHubPort(int(data['tempHumPort']['value']))
data['temperature'].setIsHubPortDevice(False)
data['temperature'].setChannel(0)
data['temperature'].openWaitForAttachment(5000)
try:
data['temperature'].getTemperature()
except Exception:
pass
if data['currentEnable']['value'] == 'true':
data['current'] = CurrentInput()
data['current'].setDeviceSerialNumber(int(data['hubSN']['value']))
data['current'].setHubPort(int(data['currentPort']['value']))
data['current'].setIsHubPortDevice(False)
data['current'].setChannel(0)
data['current'].openWaitForAttachment(5000)
try:
data['current'].getCurrent()
except Exception:
pass
if data['encoderEnable']['value'] == 'true':
data['encoder'] = Encoder()
data['encoder'].setDeviceSerialNumber(int(data['hubSN']['value']))
data['encoder'].setHubPort(int(data['encoderPort']['value']))
data['encoder'].setIsHubPortDevice(False)
data['encoder'].setChannel(0)
data['encoder'].openWaitForAttachment(5000)
data['encoder'].setDataInterval(20)
i = 0
while i < 120:
try:
data['encoder'].getPosition()
except Exception:
time.sleep(1)
i += 1
else:
break
if data['accelerometerEnable']['value'] == 'true':
data['accelerometer'] = Accelerometer()
data['accelerometer'].setDeviceSerialNumber(int(data['hubSN']['value']))
data['accelerometer'].setHubPort(int(data['spatialPort']['value']))
data['accelerometer'].setIsHubPortDevice(False)
data['accelerometer'].setChannel(0)
data['accelerometer'].openWaitForAttachment(5000)
data['accelerometer'].setDataInterval(20)
i = 0
while i < 120:
try:
data['accelerometer'].getAcceleration()
except Exception:
time.sleep(1)
i += 1
else:
break
if data['gyroscopeEnable']['value'] == 'true':
data['gyroscope'] = Gyroscope()
data['gyroscope'].setDeviceSerialNumber(int(data['hubSN']['value']))
data['gyroscope'].setHubPort(int(data['spatialPort']['value']))
data['gyroscope'].setIsHubPortDevice(False)
data['gyroscope'].setChannel(0)
data['gyroscope'].openWaitForAttachment(5000)
data['gyroscope'].setDataInterval(20)
i = 0
while i < 120:
try:
data['gyroscope'].getAngularRate()
except Exception:
time.sleep(1)
i += 1
else:
break
if data['magnetometerEnable']['value'] == 'true':
data['magnetometer'] = Magnetometer()
data['magnetometer'].setDeviceSerialNumber(int(data['hubSN']['value']))
data['magnetometer'].setHubPort(int(data['spatialPort']['value']))
data['magnetometer'].setIsHubPortDevice(False)
data['magnetometer'].setChannel(0)
data['magnetometer'].openWaitForAttachment(5000)
data['magnetometer'].setDataInterval(20)
i = 0
while i < 120:
try:
data['magnetometer'].getMagneticField()
except Exception:
time.sleep(1)
i += 1
else:
break
except Exception as ex:
_LOGGER.exception("wind_turbine exception: {}".format(str(ex)))
raise ex
# counter to know when to run process
data['tempHumCount'] = 0
data['currentCount'] = 0
data['encoderCount'] = 0
data['accelerometerCount'] = 0
data['gyroscopeCount'] = 0
data['magnetometerCount'] = 0
# counter of last encoder value
data['encoderPreviousValue'] = 0
data['encoderPreviousTime'] = 0
return data
def plugin_poll(handle):
""" Extracts data from the sensor and returns it in a JSON document as a Python dict.
Available for poll mode only.
Args:
handle: handle returned by the plugin initialisation call
Returns:
returns a sensor reading in a JSON document, as a Python dict, if it is available
None - If no reading is available
Raises:
TimeoutError
"""
# air quality is voltage reading between 0 and 5.1
# we scale is to a value between 0 and 1023
try:
time_stamp = utils.local_timestamp()
data = list()
if handle['tempHumEnable']['value'] == 'true' and handle['tempHumCount'] == 0:
data.append({
'asset': '{}{}'.format(handle['assetPrefix']['value'], handle['tempHumAssetName']['value']),
'timestamp': time_stamp,
'readings': {
"temperature": handle['temperature'].getTemperature(),
"humidity": handle['humidity'].getHumidity()
}
})
if handle['currentEnable']['value'] == 'true' and handle['currentCount'] == 0:
data.append({
'asset': '{}{}'.format(handle['assetPrefix']['value'], handle['currentAssetName']['value']),
'timestamp': time_stamp,
'readings': {
"current": handle['current'].getCurrent()
}
})
if handle['encoderEnable']['value'] == 'true' and handle['encoderCount'] == 0:
value = handle['encoder'].getPosition()
# convert time_stamp to be usable
if ":" == time_stamp[-3:-2]:
timestamp_new = datetime.datetime.strptime(time_stamp[:-3]+time_stamp[-2:], '%Y-%m-%d %H:%M:%S.%f%z')
else:
timestamp_new = datetime.datetime.strptime(time_stamp, '%Y-%m-%d %H:%M:%S.%f%z')
if handle['encoderPreviousValue'] > 0: # omit first one
# calculate elapse time in milliseconds
elapse_time = timestamp_new - handle['encoderPreviousTime']
elapse_time = elapse_time.total_seconds()
data.append({
'asset': '{}{}'.format(handle['assetPrefix']['value'], handle['encoderAssetName']['value']),
'timestamp': time_stamp,
'readings': {
# (current_total_iterations - previous_total_iterations) / (elapsed time in seconds)
"rotation-per-second": ((value - handle['encoderPreviousValue'])/1200)/elapse_time
}
})
# update old values
handle['encoderPreviousValue'] = value
handle['encoderPreviousTime'] = timestamp_new
if handle['accelerometerEnable']['value'] == 'true' and handle['accelerometerCount'] == 0:
x, y, z = handle['accelerometer'].getAcceleration()
data.append({
'asset': '{}{}'.format(handle['assetPrefix']['value'], handle['accelerometerAssetName']['value']),
'timestamp': time_stamp,
'readings': {
"accelerometer-x": x,
"accelerometer-y": y,
"accelerometer-z": z
}
})
if handle['gyroscopeEnable']['value'] == 'true' and handle['gyroscopeCount'] == 0:
x, y, z = handle['gyroscope'].getAngularRate()
data.append({
'asset': '{}{}'.format(handle['assetPrefix']['value'], handle['gyroscopeAssetName']['value']),
'timestamp': time_stamp,
'readings': {
"gyroscope-x": x,
"gyroscope-y": y,
"gyroscope-z": z
}
})
if handle['magnetometerEnable']['value'] == 'true' and handle['magnetometerCount'] == 0:
x, y, z = handle['magnetometer'].getMagneticField()
data.append({
'asset': '{}{}'.format(handle['assetPrefix']['value'], handle['magnetometerAssetName']['value']),
'timestamp': time_stamp,
'readings': {
"magnetometer-x": x,
"magnetometer-y": y,
"magnetometer-z": z
}
})
handle['tempHumCount'] = (handle['tempHumCount'] + 1) % int(handle['tempHumPoll']['value'])
handle['currentCount'] = (handle['currentCount'] + 1) % int(handle['currentPoll']['value'])
handle['encoderCount'] = (handle['encoderCount'] + 1) % int(handle['encoderPoll']['value'])
handle['accelerometerCount'] = (handle['accelerometerCount'] + 1) % int(handle['accelerometerPoll']['value'])
handle['gyroscopeCount'] = (handle['gyroscopeCount'] + 1) % int(handle['gyroscopePoll']['value'])
handle['magnetometerCount'] = (handle['magnetometerCount'] + 1) % int(handle['magnetometerPoll']['value'])
except (Exception, RuntimeError) as ex:
_LOGGER.exception("wind_turbine exception: {}".format(str(ex)))
raise exceptions.DataRetrievalError(ex)
else:
return data
def plugin_reconfigure(handle, new_config):
""" Reconfigures the plugin
Args:
handle: handle returned | |
def test_code_info_object(self):
self.assertRaises(TypeError, dis.code_info, object())
def test_pretty_flags_no_flags(self):
self.assertEqual(dis.pretty_flags(0), '0x0')
# Fodder for instruction introspection tests
# Editing any of these may require recalculating the expected output
def outer(a=1, b=2):
def f(c=3, d=4):
def inner(e=5, f=6):
print(a, b, c, d, e, f)
print(a, b, c, d)
return inner
print(a, b, '', 1, [], {}, "Hello world!")
return f
def jumpy():
# This won't actually run (but that's OK, we only disassemble it)
for i in range(10):
print(i)
if i < 4:
continue
if i > 6:
break
else:
print("I can haz else clause?")
while i:
print(i)
i -= 1
if i > 6:
continue
if i < 4:
break
else:
print("Who let lolcatz into this test suite?")
try:
1 / 0
except ZeroDivisionError:
print("Here we go, here we go, here we go...")
else:
with i as dodgy:
print("Never reach this")
finally:
print("OK, now we're done")
# End fodder for opinfo generation tests
expected_outer_line = 1
_line_offset = outer.__code__.co_firstlineno - 1
code_object_f = outer.__code__.co_consts[4]
expected_f_line = code_object_f.co_firstlineno - _line_offset
code_object_inner = code_object_f.co_consts[4]
expected_inner_line = code_object_inner.co_firstlineno - _line_offset
expected_jumpy_line = 1
# The following lines are useful to regenerate the expected results after
# either the fodder is modified or the bytecode generation changes
# After regeneration, update the references to code_object_f and
# code_object_inner before rerunning the tests
#_instructions = dis.get_instructions(outer, first_line=expected_outer_line)
#print('expected_opinfo_outer = [\n ',
#',\n '.join(map(str, _instructions)), ',\n]', sep='')
#_instructions = dis.get_instructions(outer(), first_line=expected_f_line)
#print('expected_opinfo_f = [\n ',
#',\n '.join(map(str, _instructions)), ',\n]', sep='')
#_instructions = dis.get_instructions(outer()(), first_line=expected_inner_line)
#print('expected_opinfo_inner = [\n ',
#',\n '.join(map(str, _instructions)), ',\n]', sep='')
#_instructions = dis.get_instructions(jumpy, first_line=expected_jumpy_line)
#print('expected_opinfo_jumpy = [\n ',
#',\n '.join(map(str, _instructions)), ',\n]', sep='')
Instruction = dis.Instruction
expected_opinfo_outer = [
Instruction(opname='FUNC_HEADER', opcode=6, imm=[14], argval=14, argrepr='14', offset=0, starts_line=1, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[2], argval=3, argrepr='3', offset=2, starts_line=2, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[3], argval=3, argrepr='.t0', offset=4, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[3], argval=4, argrepr='4', offset=6, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[4], argval=4, argrepr='.t1', offset=8, starts_line=None, is_jump_target=False),
Instruction(opname='MAKE_FUNCTION', opcode=106, imm=[4], argval=code_object_f, argrepr=repr(code_object_f), offset=10, starts_line=None, is_jump_target=False),
Instruction(opname='CLEAR_FAST', opcode=2, imm=[4], argval=4, argrepr='.t1', offset=12, starts_line=None, is_jump_target=False),
Instruction(opname='CLEAR_FAST', opcode=2, imm=[3], argval=3, argrepr='.t0', offset=14, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[2], argval=2, argrepr='f', offset=16, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_GLOBAL', opcode=54, imm=[5, 254], argval='print', argrepr="'print'; 254", offset=18, starts_line=7, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[6], argval=6, argrepr='.t3', offset=21, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_DEREF', opcode=56, imm=[0], argval=0, argrepr='a', offset=23, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[7], argval=7, argrepr='.t4', offset=25, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_DEREF', opcode=56, imm=[1], argval=1, argrepr='b', offset=27, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[8], argval=8, argrepr='.t5', offset=29, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[6], argval='', argrepr="''", offset=31, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[9], argval=9, argrepr='.t6', offset=33, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[7], argval=1, argrepr='1', offset=35, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[10], argval=10, argrepr='.t7', offset=37, starts_line=None, is_jump_target=False),
Instruction(opname='BUILD_LIST', opcode=93, imm=[11, 0], argval=(11, 0), argrepr='.t8; 0', offset=39, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[11], argval=11, argrepr='.t8', offset=42, starts_line=None, is_jump_target=False),
Instruction(opname='BUILD_MAP', opcode=95, imm=[0], argval=0, argrepr='0', offset=44, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[12], argval=12, argrepr='.t9', offset=46, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[8], argval='Hello world!', argrepr="'Hello world!'", offset=48, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[13], argval=13, argrepr='.t10', offset=50, starts_line=None, is_jump_target=False),
Instruction(opname='CALL_FUNCTION', opcode=70, imm=[7, 7], argval=(7, 7), argrepr='.t4 to .t11', offset=52, starts_line=None, is_jump_target=False),
Instruction(opname='CLEAR_ACC', opcode=1, imm=[], argval=None, argrepr='', offset=56, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_FAST', opcode=50, imm=[2], argval=2, argrepr='f', offset=57, starts_line=8, is_jump_target=False),
Instruction(opname='RETURN_VALUE', opcode=75, imm=[], argval=None, argrepr='', offset=59, starts_line=None, is_jump_target=False),
]
expected_opinfo_f = [
Instruction(opname='FUNC_HEADER', opcode=6, imm=[13], argval=13, argrepr='13', offset=0, starts_line=2, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[2], argval=5, argrepr='5', offset=2, starts_line=3, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[5], argval=5, argrepr='.t0', offset=4, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[3], argval=6, argrepr='6', offset=6, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[6], argval=6, argrepr='.t1', offset=8, starts_line=None, is_jump_target=False),
Instruction(opname='MAKE_FUNCTION', opcode=106, imm=[4], argval=code_object_inner, argrepr=repr(code_object_inner), offset=10, starts_line=None, is_jump_target=False),
Instruction(opname='CLEAR_FAST', opcode=2, imm=[6], argval=6, argrepr='.t1', offset=12, starts_line=None, is_jump_target=False),
Instruction(opname='CLEAR_FAST', opcode=2, imm=[5], argval=5, argrepr='.t0', offset=14, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[2], argval=2, argrepr='inner', offset=16, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_GLOBAL', opcode=54, imm=[5, 254], argval='print', argrepr="'print'; 254", offset=18, starts_line=5, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[8], argval=8, argrepr='.t3', offset=21, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_DEREF', opcode=56, imm=[3], argval=3, argrepr='a', offset=23, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[9], argval=9, argrepr='.t4', offset=25, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_DEREF', opcode=56, imm=[4], argval=4, argrepr='b', offset=27, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[10], argval=10, argrepr='.t5', offset=29, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_DEREF', opcode=56, imm=[0], argval=0, argrepr='c', offset=31, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[11], argval=11, argrepr='.t6', offset=33, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_DEREF', opcode=56, imm=[1], argval=1, argrepr='d', offset=35, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[12], argval=12, argrepr='.t7', offset=37, starts_line=None, is_jump_target=False),
Instruction(opname='CALL_FUNCTION', opcode=70, imm=[9, 4], argval=(9, 4), argrepr='.t4 to .t8', offset=39, starts_line=None, is_jump_target=False),
Instruction(opname='CLEAR_ACC', opcode=1, imm=[], argval=None, argrepr='', offset=43, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_FAST', opcode=50, imm=[2], argval=2, argrepr='inner', offset=44, starts_line=6, is_jump_target=False),
Instruction(opname='RETURN_VALUE', opcode=75, imm=[], argval=None, argrepr='', offset=46, starts_line=None, is_jump_target=False),
]
expected_opinfo_inner = [
Instruction(opname='FUNC_HEADER', opcode=6, imm=[16], argval=16, argrepr='16', offset=0, starts_line=3, is_jump_target=False),
Instruction(opname='LOAD_GLOBAL', opcode=54, imm=[2, 254], argval='print', argrepr="'print'; 254", offset=2, starts_line=4, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[9], argval=9, argrepr='.t3', offset=5, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_DEREF', opcode=56, imm=[2], argval=2, argrepr='a', offset=7, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[10], argval=10, argrepr='.t4', offset=9, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_DEREF', opcode=56, imm=[3], argval=3, argrepr='b', offset=11, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[11], argval=11, argrepr='.t5', offset=13, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_DEREF', opcode=56, imm=[4], argval=4, argrepr='c', offset=15, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[12], argval=12, argrepr='.t6', offset=17, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_DEREF', opcode=56, imm=[5], argval=5, argrepr='d', offset=19, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[13], argval=13, argrepr='.t7', offset=21, starts_line=None, is_jump_target=False),
Instruction(opname='COPY', opcode=4, imm=[14, 0], argval=(14, 0), argrepr='.t8 <- e', offset=23, starts_line=None, is_jump_target=False),
Instruction(opname='COPY', opcode=4, imm=[15, 1], argval=(15, 1), argrepr='.t9 <- f', offset=26, starts_line=None, is_jump_target=False),
Instruction(opname='CALL_FUNCTION', opcode=70, imm=[10, 6], argval=(10, 6), argrepr='.t4 to .t10', offset=29, starts_line=None, is_jump_target=False),
Instruction(opname='CLEAR_ACC', opcode=1, imm=[], argval=None, argrepr='', offset=33, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[0], argval=None, argrepr='None', offset=34, starts_line=None, is_jump_target=False),
Instruction(opname='RETURN_VALUE', opcode=75, imm=[], argval=None, argrepr='', offset=36, starts_line=None, is_jump_target=False),
]
expected_opinfo_jumpy = [
Instruction(opname='FUNC_HEADER', opcode=6, imm=[9], argval=9, argrepr='9', offset=0, starts_line=1, is_jump_target=False),
Instruction(opname='LOAD_GLOBAL', opcode=54, imm=[2, 254], argval='range', argrepr="'range'; 254", offset=2, starts_line=3, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[5], argval=5, argrepr='.t3', offset=5, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[3], argval=10, argrepr='10', offset=7, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[6], argval=6, argrepr='.t4', offset=9, starts_line=None, is_jump_target=False),
Instruction(opname='CALL_FUNCTION', opcode=70, imm=[6, 1], argval=(6, 1), argrepr='.t4 to .t5', offset=11, starts_line=None, is_jump_target=False),
Instruction(opname='GET_ITER', opcode=85, imm=[2], argval=2, argrepr='.t0', offset=15, starts_line=None, is_jump_target=False),
Instruction(opname='JUMP', opcode=79, imm=[42], argval=59, argrepr='to 59', offset=17, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[0], argval=0, argrepr='i', offset=20, starts_line=None, is_jump_target=True),
Instruction(opname='LOAD_GLOBAL', opcode=54, imm=[4, 252], argval='print', argrepr="'print'; 252", offset=22, starts_line=4, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[6], argval=6, argrepr='.t4', offset=25, starts_line=None, is_jump_target=False),
Instruction(opname='COPY', opcode=4, imm=[7, 0], argval=(7, 0), argrepr='.t5 <- i', offset=27, starts_line=None, is_jump_target=False),
Instruction(opname='CALL_FUNCTION', opcode=70, imm=[7, 1], argval=(7, 1), argrepr='.t5 to .t6', offset=30, starts_line=None, is_jump_target=False),
Instruction(opname='CLEAR_ACC', opcode=1, imm=[], argval=None, argrepr='', offset=34, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[5], argval=4, argrepr='4', offset=35, starts_line=5, is_jump_target=False),
Instruction(opname='COMPARE_OP', opcode=36, imm=[0, 0], argval=(0, 0), argrepr='0; i', offset=37, starts_line=None, is_jump_target=False),
Instruction(opname='POP_JUMP_IF_FALSE', opcode=83, imm=[6], argval=46, argrepr='to 46', offset=40, starts_line=None, is_jump_target=False),
Instruction(opname='JUMP', opcode=79, imm=[16], argval=59, argrepr='to 59', offset=43, starts_line=6, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[6], argval=6, argrepr='6', offset=46, starts_line=7, is_jump_target=True),
Instruction(opname='COMPARE_OP', opcode=36, imm=[4, 0], argval=(4, 0), argrepr='4; i', offset=48, starts_line=None, is_jump_target=False),
Instruction(opname='POP_JUMP_IF_FALSE', opcode=83, imm=[8], argval=59, argrepr='to 59', offset=51, starts_line=None, is_jump_target=False),
Instruction(opname='CLEAR_FAST', opcode=2, imm=[2], argval=2, argrepr='.t0', offset=54, starts_line=8, is_jump_target=False),
Instruction(opname='JUMP', opcode=79, imm=[21], argval=77, argrepr='to 77', offset=56, starts_line=None, is_jump_target=False),
Instruction(opname='FOR_ITER', opcode=87, imm=[2, -39], argval=(2, 20), argrepr='.t0; to 20', offset=59, starts_line=3, is_jump_target=True),
Instruction(opname='LOAD_GLOBAL', opcode=54, imm=[4, 252], argval='print', argrepr="'print'; 252", offset=63, starts_line=10, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[5], argval=5, argrepr='.t3', offset=66, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[7], argval='I can haz else clause?', argrepr="'I can haz else clause?'", offset=68, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[6], argval=6, argrepr='.t4', offset=70, starts_line=None, is_jump_target=False),
Instruction(opname='CALL_FUNCTION', opcode=70, imm=[6, 1], argval=(6, 1), argrepr='.t4 to .t5', offset=72, starts_line=None, is_jump_target=False),
Instruction(opname='CLEAR_ACC', opcode=1, imm=[], argval=None, argrepr='', offset=76, starts_line=None, is_jump_target=False),
Instruction(opname='JUMP', opcode=79, imm=[44], argval=121, argrepr='to 121', offset=77, starts_line=11, is_jump_target=True),
Instruction(opname='LOAD_GLOBAL', opcode=54, imm=[4, 252], argval='print', argrepr="'print'; 252", offset=80, starts_line=12, is_jump_target=True),
Instruction(opname='STORE_FAST', opcode=58, imm=[5], argval=5, argrepr='.t3', offset=83, starts_line=None, is_jump_target=False),
Instruction(opname='COPY', opcode=4, imm=[6, 0], argval=(6, 0), argrepr='.t4 <- i', offset=85, starts_line=None, is_jump_target=False),
Instruction(opname='CALL_FUNCTION', opcode=70, imm=[6, 1], argval=(6, 1), argrepr='.t4 to .t5', offset=88, starts_line=None, is_jump_target=False),
Instruction(opname='CLEAR_ACC', opcode=1, imm=[], argval=None, argrepr='', offset=92, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[8], argval=1, argrepr='1', offset=93, starts_line=13, is_jump_target=False),
Instruction(opname='INPLACE_SUBTRACT', opcode=40, imm=[0], argval=0, argrepr='i', offset=95, starts_line=None, is_jump_target=False),
Instruction(opname='STORE_FAST', opcode=58, imm=[0], argval=0, argrepr='i', offset=97, starts_line=None, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[6], argval=6, argrepr='6', offset=99, starts_line=14, is_jump_target=False),
Instruction(opname='COMPARE_OP', opcode=36, imm=[4, 0], argval=(4, 0), argrepr='4; i', offset=101, starts_line=None, is_jump_target=False),
Instruction(opname='POP_JUMP_IF_FALSE', opcode=83, imm=[6], argval=110, argrepr='to 110', offset=104, starts_line=None, is_jump_target=False),
Instruction(opname='JUMP', opcode=79, imm=[14], argval=121, argrepr='to 121', offset=107, starts_line=15, is_jump_target=False),
Instruction(opname='LOAD_CONST', opcode=52, imm=[5], argval=4, argrepr='4', offset=110, starts_line=16, is_jump_target=True),
Instruction(opname='COMPARE_OP', opcode=36, imm=[0, 0], argval=(0, 0), argrepr='0; i', offset=112, starts_line=None, is_jump_target=False),
Instruction(opname='POP_JUMP_IF_FALSE', opcode=83, imm=[6], argval=121, argrepr='to 121', offset=115, starts_line=None, is_jump_target=False),
Instruction(opname='JUMP', opcode=79, imm=[22], argval=140, argrepr='to 140', offset=118, starts_line=17, is_jump_target=False),
Instruction(opname='LOAD_FAST', | |
<reponame>afontscd/openstacksdk<gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_baremetal_node
----------------------------------
Tests for baremetal node related operations
"""
import uuid
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
from openstack.cloud import exc
from openstack import exceptions
from openstack.tests import fakes
from openstack.tests.unit import base
class TestBaremetalNode(base.IronicTestCase):
def setUp(self):
super(TestBaremetalNode, self).setUp()
self.fake_baremetal_node = fakes.make_fake_machine(
self.name, self.uuid)
# TODO(TheJulia): Some tests below have fake ports,
# since they are required in some processes. Lets refactor
# them at some point to use self.fake_baremetal_port.
self.fake_baremetal_port = fakes.make_fake_port(
'00:01:02:03:04:05',
node_id=self.uuid)
def test_list_machines(self):
fake_baremetal_two = fakes.make_fake_machine('two', str(uuid.uuid4()))
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(resource='nodes'),
json={'nodes': [self.fake_baremetal_node,
fake_baremetal_two]}),
])
machines = self.cloud.list_machines()
self.assertEqual(2, len(machines))
self.assertSubdict(self.fake_baremetal_node, machines[0])
self.assertSubdict(fake_baremetal_two, machines[1])
self.assert_calls()
def test_get_machine(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
machine = self.cloud.get_machine(self.fake_baremetal_node['uuid'])
self.assertEqual(machine['uuid'],
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_get_machine_by_mac(self):
mac_address = '00:01:02:03:04:05'
url_address = 'detail?address=%s' % mac_address
node_uuid = self.fake_baremetal_node['uuid']
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='ports',
append=[url_address]),
json={'ports': [{'address': mac_address,
'node_uuid': node_uuid}]}),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
machine = self.cloud.get_machine_by_mac(mac_address)
self.assertEqual(machine['uuid'],
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_validate_machine(self):
# NOTE(TheJulia): Note: These are only the interfaces
# that are validated, and all must be true for an
# exception to not be raised.
validate_return = {
'boot': {
'result': True,
},
'deploy': {
'result': True,
},
'management': {
'result': True,
},
'power': {
'result': True,
},
'foo': {
'result': False,
}}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'validate']),
json=validate_return),
])
self.cloud.validate_machine(self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_validate_machine_not_for_deploy(self):
validate_return = {
'deploy': {
'result': False,
'reason': 'Not ready',
},
'power': {
'result': True,
},
'foo': {
'result': False,
}}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'validate']),
json=validate_return),
])
self.cloud.validate_machine(self.fake_baremetal_node['uuid'],
for_deploy=False)
self.assert_calls()
def test_deprecated_validate_node(self):
validate_return = {
'deploy': {
'result': True,
},
'power': {
'result': True,
},
'foo': {
'result': False,
}}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'validate']),
json=validate_return),
])
self.cloud.validate_node(self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_validate_machine_raises_exception(self):
validate_return = {
'deploy': {
'result': False,
'reason': 'error!',
},
'power': {
'result': True,
'reason': None,
},
'foo': {
'result': True
}}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'validate']),
json=validate_return),
])
self.assertRaises(
exceptions.ValidationException,
self.cloud.validate_machine,
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_patch_machine(self):
test_patch = [{
'op': 'remove',
'path': '/instance_info'}]
self.fake_baremetal_node['instance_info'] = {}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(method='PATCH',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node,
validate=dict(json=test_patch)),
])
self.cloud.patch_machine(
self.fake_baremetal_node['uuid'], test_patch)
self.assert_calls()
def test_set_node_instance_info(self):
test_patch = [{
'op': 'add',
'path': '/foo',
'value': 'bar'}]
self.register_uris([
dict(method='PATCH',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node,
validate=dict(json=test_patch)),
])
self.cloud.set_node_instance_info(
self.fake_baremetal_node['uuid'], test_patch)
self.assert_calls()
def test_purge_node_instance_info(self):
test_patch = [{
'op': 'remove',
'path': '/instance_info'}]
self.fake_baremetal_node['instance_info'] = {}
self.register_uris([
dict(method='PATCH',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node,
validate=dict(json=test_patch)),
])
self.cloud.purge_node_instance_info(
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_inspect_machine_fail_active(self):
self.fake_baremetal_node['provision_state'] = 'active'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.inspect_machine,
self.fake_baremetal_node['uuid'],
wait=True,
timeout=1)
self.assert_calls()
def test_inspect_machine_fail_associated(self):
self.fake_baremetal_node['provision_state'] = 'available'
self.fake_baremetal_node['instance_uuid'] = '1234'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaisesRegex(
exc.OpenStackCloudException,
'associated with an instance',
self.cloud.inspect_machine,
self.fake_baremetal_node['uuid'],
wait=True,
timeout=1)
self.assert_calls()
def test_inspect_machine_failed(self):
inspecting_node = self.fake_baremetal_node.copy()
self.fake_baremetal_node['provision_state'] = 'inspect failed'
self.fake_baremetal_node['last_error'] = 'kaboom!'
inspecting_node['provision_state'] = 'inspecting'
finished_node = self.fake_baremetal_node.copy()
finished_node['provision_state'] = 'manageable'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'inspect'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspecting_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=finished_node),
])
self.cloud.inspect_machine(self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_inspect_machine_manageable(self):
self.fake_baremetal_node['provision_state'] = 'manageable'
inspecting_node = self.fake_baremetal_node.copy()
inspecting_node['provision_state'] = 'inspecting'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'inspect'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspecting_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.cloud.inspect_machine(self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_inspect_machine_available(self):
available_node = self.fake_baremetal_node.copy()
available_node['provision_state'] = 'available'
manageable_node = self.fake_baremetal_node.copy()
manageable_node['provision_state'] = 'manageable'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'inspect'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'provide'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
])
self.cloud.inspect_machine(self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_inspect_machine_available_wait(self):
available_node = self.fake_baremetal_node.copy()
available_node['provision_state'] = 'available'
manageable_node = self.fake_baremetal_node.copy()
manageable_node['provision_state'] = 'manageable'
inspecting_node = self.fake_baremetal_node.copy()
inspecting_node['provision_state'] = 'inspecting'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'inspect'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspecting_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'provide'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
])
self.cloud.inspect_machine(
self.fake_baremetal_node['uuid'], wait=True, timeout=1)
self.assert_calls()
def test_inspect_machine_wait(self):
self.fake_baremetal_node['provision_state'] = 'manageable'
inspecting_node = self.fake_baremetal_node.copy()
inspecting_node['provision_state'] = 'inspecting'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'inspect'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspecting_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspecting_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.cloud.inspect_machine(
self.fake_baremetal_node['uuid'], wait=True, timeout=1)
self.assert_calls()
def test_inspect_machine_inspect_failed(self):
self.fake_baremetal_node['provision_state'] = 'manageable'
inspecting_node = self.fake_baremetal_node.copy()
inspecting_node['provision_state'] = 'inspecting'
inspect_fail_node = self.fake_baremetal_node.copy()
inspect_fail_node['provision_state'] = 'inspect failed'
inspect_fail_node['last_error'] = 'Earth Imploded'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'inspect'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspecting_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspect_fail_node),
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.inspect_machine,
self.fake_baremetal_node['uuid'],
wait=True, timeout=1)
self.assert_calls()
def test_set_machine_maintenace_state(self):
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'maintenance']),
validate=dict(json={'reason': 'no reason'})),
])
self.cloud.set_machine_maintenance_state(
self.fake_baremetal_node['uuid'], True, reason='no reason')
self.assert_calls()
def test_set_machine_maintenace_state_false(self):
self.register_uris([
dict(
method='DELETE',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'maintenance'])),
])
self.cloud.set_machine_maintenance_state(
self.fake_baremetal_node['uuid'], False)
self.assert_calls
def test_remove_machine_from_maintenance(self):
self.register_uris([
dict(
method='DELETE',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'maintenance'])),
])
self.cloud.remove_machine_from_maintenance(
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_set_machine_power_on(self):
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
validate=dict(json={'target': 'power on'})),
])
return_value = self.cloud.set_machine_power_on(
self.fake_baremetal_node['uuid'])
self.assertIsNone(return_value)
self.assert_calls()
def test_set_machine_power_on_with_retires(self):
# NOTE(TheJulia): This logic ends up testing power on/off and reboot
# as they all utilize the same helper method.
self.register_uris([
dict(
method='PUT',
status_code=503,
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
validate=dict(json={'target': 'power on'})),
dict(
method='PUT',
status_code=409,
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
validate=dict(json={'target': 'power on'})),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
validate=dict(json={'target': 'power on'})),
])
return_value = self.cloud.set_machine_power_on(
self.fake_baremetal_node['uuid'])
self.assertIsNone(return_value)
self.assert_calls()
def test_set_machine_power_off(self):
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
validate=dict(json={'target': 'power off'})),
])
return_value = self.cloud.set_machine_power_off(
self.fake_baremetal_node['uuid'])
self.assertIsNone(return_value)
self.assert_calls()
def test_set_machine_power_reboot(self):
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
validate=dict(json={'target': 'rebooting'})),
])
return_value = self.cloud.set_machine_power_reboot(
self.fake_baremetal_node['uuid'])
self.assertIsNone(return_value)
self.assert_calls()
def test_set_machine_power_reboot_failure(self):
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
status_code=400,
json={'error': 'invalid'},
validate=dict(json={'target': 'rebooting'})),
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.set_machine_power_reboot,
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_node_set_provision_state(self):
deploy_node = self.fake_baremetal_node.copy()
deploy_node['provision_state'] = 'deploying'
active_node = self.fake_baremetal_node.copy()
active_node['provision_state'] = 'active'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active',
'configdrive': 'http://host/file'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.cloud.node_set_provision_state(
self.fake_baremetal_node['uuid'],
'active',
configdrive='http://host/file')
self.assert_calls()
def test_node_set_provision_state_with_retries(self):
deploy_node = self.fake_baremetal_node.copy()
deploy_node['provision_state'] = 'deploying'
active_node = self.fake_baremetal_node.copy()
active_node['provision_state'] = 'active'
self.register_uris([
dict(
method='PUT',
status_code=409,
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active',
'configdrive': 'http://host/file'})),
dict(
method='PUT',
status_code=503,
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active',
'configdrive': 'http://host/file'})),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active',
'configdrive': 'http://host/file'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.cloud.node_set_provision_state(
self.fake_baremetal_node['uuid'],
'active',
configdrive='http://host/file')
self.assert_calls()
def test_node_set_provision_state_wait_timeout(self):
deploy_node = self.fake_baremetal_node.copy()
deploy_node['provision_state'] = 'deploying'
active_node = self.fake_baremetal_node.copy()
active_node['provision_state'] = 'active'
self.fake_baremetal_node['provision_state'] = 'available'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=deploy_node),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=active_node),
])
return_value = self.cloud.node_set_provision_state(
self.fake_baremetal_node['uuid'],
'active',
wait=True)
self.assertSubdict(active_node, return_value)
self.assert_calls()
def test_node_set_provision_state_wait_timeout_fails(self):
# Intentionally time out.
self.fake_baremetal_node['provision_state'] = 'deploy wait'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.node_set_provision_state,
self.fake_baremetal_node['uuid'],
'active',
wait=True,
timeout=0.001)
self.assert_calls()
def test_node_set_provision_state_wait_success(self):
self.fake_baremetal_node['provision_state'] = 'active'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
return_value = self.cloud.node_set_provision_state(
self.fake_baremetal_node['uuid'],
'active',
wait=True)
self.assertSubdict(self.fake_baremetal_node, return_value)
self.assert_calls()
def test_node_set_provision_state_wait_failure_cases(self):
self.fake_baremetal_node['provision_state'] = 'foo failed'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
| |
= params.get("Uid")
self.GroupId = params.get("GroupId")
class GroupInfo(AbstractModel):
"""用户组信息
"""
def __init__(self):
"""
:param GroupId: 用户组 ID。
:type GroupId: int
:param GroupName: 用户组名称。
:type GroupName: str
:param CreateTime: 用户组创建时间。
:type CreateTime: str
:param Remark: 用户组描述。
:type Remark: str
"""
self.GroupId = None
self.GroupName = None
self.CreateTime = None
self.Remark = None
def _deserialize(self, params):
self.GroupId = params.get("GroupId")
self.GroupName = params.get("GroupName")
self.CreateTime = params.get("CreateTime")
self.Remark = params.get("Remark")
class GroupMemberInfo(AbstractModel):
"""用户组用户信息
"""
def __init__(self):
"""
:param Uid: 子用户 Uid。
:type Uid: int
:param Uin: 子用户 Uin。
:type Uin: int
:param Name: 子用户名称。
:type Name: str
:param PhoneNum: 手机号。
:type PhoneNum: str
:param CountryCode: 手机区域代码。
:type CountryCode: str
:param PhoneFlag: 是否已验证手机。
:type PhoneFlag: int
:param Email: 邮箱地址。
:type Email: str
:param EmailFlag: 是否已验证邮箱。
:type EmailFlag: int
:param UserType: 用户类型。
:type UserType: int
:param CreateTime: 创建时间。
:type CreateTime: str
:param IsReceiverOwner: 是否为主消息接收人。
:type IsReceiverOwner: int
"""
self.Uid = None
self.Uin = None
self.Name = None
self.PhoneNum = None
self.CountryCode = None
self.PhoneFlag = None
self.Email = None
self.EmailFlag = None
self.UserType = None
self.CreateTime = None
self.IsReceiverOwner = None
def _deserialize(self, params):
self.Uid = params.get("Uid")
self.Uin = params.get("Uin")
self.Name = params.get("Name")
self.PhoneNum = params.get("PhoneNum")
self.CountryCode = params.get("CountryCode")
self.PhoneFlag = params.get("PhoneFlag")
self.Email = params.get("Email")
self.EmailFlag = params.get("EmailFlag")
self.UserType = params.get("UserType")
self.CreateTime = params.get("CreateTime")
self.IsReceiverOwner = params.get("IsReceiverOwner")
class ListAttachedGroupPoliciesRequest(AbstractModel):
"""ListAttachedGroupPolicies请求参数结构体
"""
def __init__(self):
"""
:param TargetGroupId: 用户组ID
:type TargetGroupId: int
:param Page: 页码,默认值是 1,从 1 开始
:type Page: int
:param Rp: 每页大小,默认值是 20
:type Rp: int
"""
self.TargetGroupId = None
self.Page = None
self.Rp = None
def _deserialize(self, params):
self.TargetGroupId = params.get("TargetGroupId")
self.Page = params.get("Page")
self.Rp = params.get("Rp")
class ListAttachedGroupPoliciesResponse(AbstractModel):
"""ListAttachedGroupPolicies返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 策略总数
:type TotalNum: int
:param List: 策略列表
:type List: list of AttachPolicyInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.List = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
if params.get("List") is not None:
self.List = []
for item in params.get("List"):
obj = AttachPolicyInfo()
obj._deserialize(item)
self.List.append(obj)
self.RequestId = params.get("RequestId")
class ListAttachedRolePoliciesRequest(AbstractModel):
"""ListAttachedRolePolicies请求参数结构体
"""
def __init__(self):
"""
:param Page: 页码,从 1 开始
:type Page: int
:param Rp: 每页行数,不能大于200
:type Rp: int
:param RoleId: 角色 ID。用于指定角色,入参 RoleId 与 RoleName 二选一
:type RoleId: str
:param RoleName: 角色名。用于指定角色,入参 RoleId 与 RoleName 二选一
:type RoleName: str
:param PolicyType: 按策略类型过滤,User表示仅查询自定义策略,QCS表示仅查询预设策略
:type PolicyType: str
"""
self.Page = None
self.Rp = None
self.RoleId = None
self.RoleName = None
self.PolicyType = None
def _deserialize(self, params):
self.Page = params.get("Page")
self.Rp = params.get("Rp")
self.RoleId = params.get("RoleId")
self.RoleName = params.get("RoleName")
self.PolicyType = params.get("PolicyType")
class ListAttachedRolePoliciesResponse(AbstractModel):
"""ListAttachedRolePolicies返回参数结构体
"""
def __init__(self):
"""
:param List: 角色关联的策略列表
:type List: list of AttachedPolicyOfRole
:param TotalNum: 角色关联的策略总数
:type TotalNum: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.List = None
self.TotalNum = None
self.RequestId = None
def _deserialize(self, params):
if params.get("List") is not None:
self.List = []
for item in params.get("List"):
obj = AttachedPolicyOfRole()
obj._deserialize(item)
self.List.append(obj)
self.TotalNum = params.get("TotalNum")
self.RequestId = params.get("RequestId")
class ListAttachedUserPoliciesRequest(AbstractModel):
"""ListAttachedUserPolicies请求参数结构体
"""
def __init__(self):
"""
:param TargetUin: 子账号 uin
:type TargetUin: int
:param Page: 页码,默认值是 1,从 1 开始
:type Page: int
:param Rp: 每页大小,默认值是 20
:type Rp: int
"""
self.TargetUin = None
self.Page = None
self.Rp = None
def _deserialize(self, params):
self.TargetUin = params.get("TargetUin")
self.Page = params.get("Page")
self.Rp = params.get("Rp")
class ListAttachedUserPoliciesResponse(AbstractModel):
"""ListAttachedUserPolicies返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 策略总数
:type TotalNum: int
:param List: 策略列表
:type List: list of AttachPolicyInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.List = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
if params.get("List") is not None:
self.List = []
for item in params.get("List"):
obj = AttachPolicyInfo()
obj._deserialize(item)
self.List.append(obj)
self.RequestId = params.get("RequestId")
class ListEntitiesForPolicyRequest(AbstractModel):
"""ListEntitiesForPolicy请求参数结构体
"""
def __init__(self):
"""
:param PolicyId: 策略 id
:type PolicyId: int
:param Page: 页码,默认值是 1,从 1 开始
:type Page: int
:param Rp: 每页大小,默认值是 20
:type Rp: int
:param EntityFilter: 可取值 'All'、'User'、'Group' 和 'Role','All' 表示获取所有实体类型,'User' 表示只获取子账号,'Group' 表示只获取用户组,'Role' 表示只获取角色,默认取 'All'
:type EntityFilter: str
"""
self.PolicyId = None
self.Page = None
self.Rp = None
self.EntityFilter = None
def _deserialize(self, params):
self.PolicyId = params.get("PolicyId")
self.Page = params.get("Page")
self.Rp = params.get("Rp")
self.EntityFilter = params.get("EntityFilter")
class ListEntitiesForPolicyResponse(AbstractModel):
"""ListEntitiesForPolicy返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 实体总数
注意:此字段可能返回 null,表示取不到有效值。
:type TotalNum: int
:param List: 实体列表
注意:此字段可能返回 null,表示取不到有效值。
:type List: list of AttachEntityOfPolicy
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.List = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
if params.get("List") is not None:
self.List = []
for item in params.get("List"):
obj = AttachEntityOfPolicy()
obj._deserialize(item)
self.List.append(obj)
self.RequestId = params.get("RequestId")
class ListGroupsForUserRequest(AbstractModel):
"""ListGroupsForUser请求参数结构体
"""
def __init__(self):
"""
:param Uid: 子用户 UID
:type Uid: int
:param Rp: 每页数量。默认为20。
:type Rp: int
:param Page: 页码。默认为1。
:type Page: int
"""
self.Uid = None
self.Rp = None
self.Page = None
def _deserialize(self, params):
self.Uid = params.get("Uid")
self.Rp = params.get("Rp")
self.Page = params.get("Page")
class ListGroupsForUserResponse(AbstractModel):
"""ListGroupsForUser返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 子用户加入的用户组总数
:type TotalNum: int
:param GroupInfo: 用户组信息
:type GroupInfo: list of GroupInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.GroupInfo = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
if params.get("GroupInfo") is not None:
self.GroupInfo = []
for item in params.get("GroupInfo"):
obj = GroupInfo()
obj._deserialize(item)
self.GroupInfo.append(obj)
self.RequestId = params.get("RequestId")
class ListGroupsRequest(AbstractModel):
"""ListGroups请求参数结构体
"""
def __init__(self):
"""
:param Page: 页码。默认为1。
:type Page: int
:param Rp: 每页数量。默认为20。
:type Rp: int
:param Keyword: 按用户组名称匹配。
:type Keyword: str
"""
self.Page = None
self.Rp = None
self.Keyword = None
def _deserialize(self, params):
self.Page = params.get("Page")
self.Rp = params.get("Rp")
self.Keyword = params.get("Keyword")
class ListGroupsResponse(AbstractModel):
"""ListGroups返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 用户组总数。
:type TotalNum: int
:param GroupInfo: 用户组数组信息。
:type GroupInfo: list of GroupInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.GroupInfo = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
if params.get("GroupInfo") is not None:
self.GroupInfo = []
for item in params.get("GroupInfo"):
obj = GroupInfo()
obj._deserialize(item)
self.GroupInfo.append(obj)
self.RequestId = params.get("RequestId")
class ListPoliciesRequest(AbstractModel):
"""ListPolicies请求参数结构体
"""
def __init__(self):
"""
:param Rp: 每页数量,默认值是 20,必须大于 0 且小于或等于 200
:type Rp: int
:param Page: 页码,默认值是 1,从 1开始,不能大于 200
:type Page: int
:param Scope: 可取值 'All'、'QCS' 和 'Local','All' 获取所有策略,'QCS' 只获取预设策略,'Local' 只获取自定义策略,默认取 'All'
:type Scope: str
:param Keyword: 按策略名匹配
:type Keyword: str
"""
self.Rp = None
self.Page = None
self.Scope = None
self.Keyword = None
def _deserialize(self, params):
self.Rp = params.get("Rp")
self.Page = params.get("Page")
self.Scope = params.get("Scope")
self.Keyword = params.get("Keyword")
class ListPoliciesResponse(AbstractModel):
"""ListPolicies返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 策略总数
:type TotalNum: int
:param List: 策略数组,数组每个成员包括 policyId、policyName、addTime、type、description、 createMode 字段。其中:
policyId:策略 id
policyName:策略名
addTime:策略创建时间
type:1 表示自定义策略,2 表示预设策略
description:策略描述
createMode:1 表示按业务权限创建的策略,其他值表示可以查看策略语法和通过策略语法更新策略
Attachments: 关联的用户数
ServiceType: 策略关联的产品
IsAttached: 当需要查询标记实体是否已经关联策略时不为null。0表示未关联策略,1表示已关联策略
:type List: list of StrategyInfo
:param ServiceTypeList: 保留字段
注意:此字段可能返回 null,表示取不到有效值。
:type ServiceTypeList: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.List = None
self.ServiceTypeList = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
if params.get("List") is not None:
self.List = []
for item in params.get("List"):
obj = StrategyInfo()
obj._deserialize(item)
self.List.append(obj)
self.ServiceTypeList = params.get("ServiceTypeList")
self.RequestId = params.get("RequestId")
class ListSAMLProvidersRequest(AbstractModel):
"""ListSAMLProviders请求参数结构体
"""
class ListSAMLProvidersResponse(AbstractModel):
"""ListSAMLProviders返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: SAML身份提供商总数
:type TotalCount: int
:param SAMLProviderSet: SAML身份提供商列表
:type SAMLProviderSet: list of SAMLProviderInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.SAMLProviderSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("SAMLProviderSet") is not None:
self.SAMLProviderSet = []
for item in params.get("SAMLProviderSet"):
obj = SAMLProviderInfo()
obj._deserialize(item)
self.SAMLProviderSet.append(obj)
self.RequestId = params.get("RequestId")
class ListUsersForGroupRequest(AbstractModel):
"""ListUsersForGroup请求参数结构体
"""
def __init__(self):
"""
:param GroupId: 用户组 ID。
:type GroupId: int
:param Page: 页码。默认为1。
:type Page: int
:param Rp: 每页数量。默认为20。
:type Rp: int
"""
self.GroupId = None
self.Page = None
self.Rp = None
def _deserialize(self, params):
self.GroupId = params.get("GroupId")
self.Page = params.get("Page")
self.Rp = params.get("Rp")
class ListUsersForGroupResponse(AbstractModel):
"""ListUsersForGroup返回参数结构体
"""
def __init__(self):
"""
:param TotalNum: 用户组关联的用户总数。
:type TotalNum: int
:param UserInfo: 子用户信息。
:type UserInfo: list of GroupMemberInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalNum = None
self.UserInfo = None
self.RequestId = None
def _deserialize(self, params):
self.TotalNum = params.get("TotalNum")
if params.get("UserInfo") is not None:
self.UserInfo = []
for item in params.get("UserInfo"):
obj = GroupMemberInfo()
obj._deserialize(item)
self.UserInfo.append(obj)
| |
float('inf'),
QC_pw_min = float('-inf'), QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = -15,
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# dmpag_vglut2_200716_c4_LDECE_OP_VC_clear_nointerval_2
# prominence (100-200), peak (-150-0)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = float('-inf'), QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = float('inf'),
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# dmpag_vglut2_200716_c5_LDECF_OP_VC_clear_nointerval_1
# prominence (100-400), peak (-400-0)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = float('-inf'), QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = float('inf'),
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# dmpag_vglut2_200716_c6_LDECG_OP_VC_clear_nointerval_1
# prominence (25-50), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = float('-inf'), QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = -20,
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# dmpag_vglut2_200716_c7_LDECH_OP_VC_clear_nointerval_1
# prominence (24-50), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = float('-inf'), QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = -20,
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# dmpag_vglut2_200716_c8_LDECI_OP_VC_clear_nointerval_1
# prominence (30-100), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 4, QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = float('inf'),
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# dmpag_vglut2_201116_c6_LDECL_OP_VC_clear_nointerval_1
# prominence (75-350), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = 300,
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 4, QC_pw_max = 20,
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = 0,
QC_lb_min = 7, QC_lb_max = float('inf'),
QC_rb_min = 7, QC_rb_max = float('inf')
)
# dmpag_vglut2_201116_c7_LDECM_OP_VC_clear_nointerval_1
# prominence (50-200), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 3, QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = float('inf'),
QC_lb_min = 5, QC_lb_max = float('inf'),
QC_rb_min = 5, QC_rb_max = float('inf')
)
# dmpag_vglut2_201116_c9_LDECO_OP_VC_clear_nointerval_1
# prominence (35-80), peak (-100-0)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 4, QC_pw_max = 20,
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = -80, QC_pb_max = -20,
QC_lb_min = 4, QC_lb_max = float('inf'),
QC_rb_min = 5, QC_rb_max = float('inf')
)
# dmpag_vglut2_201117_c6_LDECS_OP_VC_clear_nointerval_1
# prominence (75-150), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 4, QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = -10,
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# dmpag_vglut2_201119_c10_LDEDB_OP_VC_clear_nointerval_1
# prominence (40-200), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 4, QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = -10,
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# dmpag_vglut2_201119_c7_LDECY_OP_VC_clear_nointerval_1
# prominence (75-250), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 5, QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = -30,
QC_lb_min = 5, QC_lb_max = float('inf'),
QC_rb_min = 5, QC_rb_max = float('inf')
)
# dmpag_vglut2_201120_c9_LDEDG_OP_VC_clear_nointerval_1
# prominence (35-200), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 7, QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = float('inf'),
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
##### lpag #####
# lpag_vglut2_190115_c2_LDEBC_OP_VC_clear_nointerval_1
# prominence (25, 50), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = float('-inf'), QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = -30,
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# lpag_vglut2_190117_c2_LDEBK_OP_VC_clear_nointerval_1
# prominence (30, 300), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 5, QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = -200, QC_pb_max = float('inf'),
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# lpag_vglut2_190117_c4_LDEBM_OP_VC_clear_nointerval_1
# prominence (28, 100), peak (-100, 0)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 4, QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = float('inf'),
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# lpag_vglut2_190208_c1_LDEBR_OP_VC_clear_nointerval_2
# prominence (30, 100), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 5, QC_pw_max = 20,
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = -25,
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# lpag_vglut2_201117_c11_LDECX_OP_VC_clear_nointerval_2
# prominence (25, 50), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 10, QC_pw_max = float('inf'),
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = -20,
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max = float('inf')
)
# lpag_vglut2_201117_c8_LDECU_OP_VC_clear_nointerval_1
# prominence (170, 200), peak (NA)
peaks_QC, cut_spikes_QC, cut_spikes_holding_QC, cut_spikes_baselined_QC, parameters_QC = spikesQC(
file_name, peaks, peaks_properties,
cut_spikes, cut_spikes_holding, cut_spikes_baselined,
filter_by = ['p', 'wh', 'pw', 'ph', 'pb', 'lb', 'rb'],
QC_p_min = float('-inf'), QC_p_max = float('inf'),
QC_wh_min = float('-inf'), QC_wh_max = float('inf'),
QC_pw_min = 5, QC_pw_max = 20,
QC_ph_min = float('-inf'), QC_ph_max = float('inf'),
QC_pb_min = float('-inf'), QC_pb_max = 0,
QC_lb_min = float('-inf'), QC_lb_max = float('inf'),
QC_rb_min = float('-inf'), QC_rb_max | |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017 by University of Kassel and Fraunhofer Institute for Wind Energy and
# Energy System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
import copy
import warnings
import numpy as np
import pytest
import pandapower as pp
import pandapower.networks as nw
from pandapower.diagnostic_reports import DiagnosticReports
try:
import numba
numba_installed = True
except:
numba_installed = False
@pytest.fixture(scope='module')
def test_net():
net = nw.example_multivoltage()
return net
@pytest.fixture(scope='module')
def diag_params():
diag_params = {
"overload_scaling_factor": 0.001,
"lines_min_length_km": 0,
"lines_min_z_ohm": 0,
"nom_voltage_tolerance": 0.3,
"numba_tolerance": 1e-5}
return diag_params
@pytest.fixture(scope='module')
def report_methods():
report_methods = {
"missing_bus_indeces": "diag_report.report_missing_bus_indeces()",
"disconnected_elements": "diag_report.report_disconnected_elements()",
"different_voltage_levels_connected": "diag_report.report_different_voltage_levels_connected()",
"lines_with_impedance_close_to_zero": "diag_report.report_lines_with_impedance_close_to_zero()",
"nominal_voltages_dont_match": "diag_report.report_nominal_voltages_dont_match()",
"invalid_values": "diag_report.report_invalid_values()",
"overload": "diag_report.report_overload()",
"multiple_voltage_controlling_elements_per_bus" : "diag_report.report_multiple_voltage_controlling_elements_per_bus()",
"wrong_switch_configuration": "diag_report.report_wrong_switch_configuration()",
"no_ext_grid": "diag_report.report_no_ext_grid()",
"wrong_reference_system": "diag_report.report_wrong_reference_system()",
"deviation_from_std_type": "diag_report.report_deviation_from_std_type()",
"numba_comparison": "diag_report.report_numba_comparison()",
"parallel_switches": "diag_report.report_parallel_switches()"}
return report_methods
def test_no_issues(diag_params, report_methods):
net = nw.example_simple()
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
diag_results = pp.diagnostic(net, report_style=None)
assert diag_results == {}
for bool_value in [True, False]:
for check_function in report_methods.keys():
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
class TestInvalidValues:
def test_greater_zero(self, test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'invalid_values'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.bus.loc[42, 'vn_kv'] = '-1'
net.line.loc[7, 'length_km'] = -1
net.line.loc[8, 'max_i_ka'] = 0
net.trafo.loc[0, 'vsc_percent'] = 0.0
net.trafo.loc[0, 'sn_kva'] = None
net.trafo.loc[0, 'vn_hv_kv'] = -1.5
net.trafo.loc[0, 'vn_lv_kv'] = False
net.trafo3w.loc[0, 'vsc_hv_percent'] = 2.3
net.trafo3w.loc[0, 'vsc_mv_percent'] = np.nan
net.trafo3w.loc[0, 'vsc_lv_percent'] = 0.0
net.trafo3w.loc[0, 'sn_hv_kva'] = 11
net.trafo3w.loc[0, 'sn_mv_kva'] = 'a'
net.trafo3w.loc[0, 'vn_hv_kv'] = -1.5
net.trafo3w.loc[0, 'vn_mv_kv'] = -1.5
net.trafo3w.loc[0, 'vn_lv_kv'] = False
net.ext_grid.loc[0, 'vm_pu'] = True
check_result = pp.invalid_values(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'bus': [(42, 'vn_kv', '-1', '>0')],
'ext_grid': [(0, 'vm_pu', True, '>0')],
'line': [(7, 'length_km', -1.0, '>0'), (8, 'max_i_ka', 0.0, '>0')],
'trafo': [(0, 'sn_kva', 'nan', '>0'), (0, 'vn_hv_kv', -1.5, '>0'),
(0, 'vn_lv_kv', False, '>0'), (0, 'vsc_percent', 0.0, '>0')],
'trafo3w': [(0, 'sn_mv_kva', 'a', '>0'), (0, 'vn_hv_kv', -1.5, '>0'),
(0, 'vn_mv_kv', -1.5, '>0'), (0, 'vn_lv_kv', False, '>0'),
(0, 'vsc_mv_percent', 'nan', '>0'), (0, 'vsc_lv_percent', 0.0, '>0')]}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_greater_equal_zero(self, test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'invalid_values'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.line.loc[7, 'r_ohm_per_km'] = -1
net.line.loc[8, 'x_ohm_per_km'] = None
net.line.loc[8, 'c_nf_per_km'] = '0'
net.trafo.loc[0, 'vscr_percent'] = '-1'
net.trafo.loc[0, 'pfe_kw'] = -1.5
net.trafo.loc[0, 'i0_percent'] = -0.001
net.trafo3w.loc[0, 'vscr_hv_percent'] = True
net.trafo3w.loc[0, 'vscr_mv_percent'] = False
net.trafo3w.loc[0, 'vscr_lv_percent'] = 1
net.trafo3w.loc[0, 'pfe_kw'] = '2'
net.trafo3w.loc[0, 'i0_percent'] = 10
net.load.loc[0, 'scaling'] = -0.1
net.load.loc[1, 'scaling'] = 0
net.load.loc[2, 'scaling'] = 1
net.load.loc[3, 'scaling'] = '1'
net.gen.loc[0, 'scaling'] = None
net.sgen.loc[0, 'scaling'] = False
check_result = pp.invalid_values(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'line': [(7, 'r_ohm_per_km', -1.0, '>=0'), (8, 'x_ohm_per_km', 'nan', '>=0'),
(8, 'c_nf_per_km', '0', '>=0')],
'trafo': [(0, 'vscr_percent', '-1', '>=0'), (0, 'pfe_kw', -1.5, '>=0'),
(0, 'i0_percent', -0.001, '>=0')],
'trafo3w': [(0, 'vscr_hv_percent', True, '>=0'), (0, 'vscr_mv_percent', False, '>=0'),
(0, 'pfe_kw', '2', '>=0')],
'gen': [(0, 'scaling', 'nan', '>=0')],
'load': [(0, 'scaling', -0.1, '>=0'), (3, 'scaling', '1', '>=0')],
'sgen': [(0, 'scaling', False, '>=0')]}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
#
#
# #def test_smaller_zero(self, net): # check_smaller_zero currently not in use
# #pass
#
# #def test_smaller_equal_zero(self, net): # check_smaller_equal_zero currently not in use
# #pass
#
#
def test_boolean(self, test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'invalid_values'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.sgen.loc[0, 'in_service'] = 0
net.sgen.loc[1, 'in_service'] = 0.0
net.sgen.loc[2, 'in_service'] = '0'
net.sgen.loc[3, 'in_service'] = '0.0'
net.sgen.loc[4, 'in_service'] = 1
net.gen.loc[0, 'in_service'] = '1'
net.load.loc[0, 'in_service'] = 10
net.line.loc[0, 'in_service'] = -1
net.bus.loc[0, 'in_service'] = 'no'
net.trafo.loc[0, 'in_service'] = 'True'
net.trafo3w.loc[0, 'in_service'] = None
net.switch.loc[0, 'closed'] = 0
net.switch.loc[1, 'closed'] = 'False'
net.switch.loc[2, 'closed'] = False
net.switch.loc[3, 'closed'] = 'False'
net.switch.loc[4, 'closed'] = None
net.switch.loc[5, 'closed'] = 10
check_result = pp.invalid_values(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'bus': [(0, 'in_service', 'no', 'boolean')],
'gen': [(0, 'in_service', '1', 'boolean')],
'sgen': [(2, 'in_service', '0', 'boolean'), (3, 'in_service', '0.0', 'boolean')],
'switch': [(1, 'closed', 'False', 'boolean'), (3, 'closed', 'False', 'boolean'),
(4, 'closed', 'None', 'boolean'), (5, 'closed', 10, 'boolean')],
'trafo': [(0, 'in_service', 'True', 'boolean')],
'trafo3w': [(0, 'in_service', 'nan', 'boolean')]}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_pos_int(self, test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'invalid_values'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.line.loc[7, 'from_bus'] = 1
net.line.loc[8, 'to_bus'] = '2'
net.trafo.loc[0, 'hv_bus'] = False
net.trafo.loc[0, 'lv_bus'] = None
net.trafo3w.loc[0, 'hv_bus'] = False
net.trafo3w.loc[0, 'mv_bus'] = 0.5
net.trafo3w.loc[0, 'lv_bus'] = 2
net.load.loc[0, 'bus'] = True
net.sgen.loc[0, 'bus'] = 1.5
net.gen.loc[0, 'bus'] = np.nan
net.ext_grid.loc[0, 'bus'] = -2.5
net.switch.loc[0, 'bus'] = None
net.switch.loc[0, 'element'] = -1.5
check_result = pp.invalid_values(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'ext_grid': [(0, 'bus', -2.5, 'positive_integer')],
'gen': [(0, 'bus', 'nan', 'positive_integer')],
'line': [(8, 'to_bus', '2', 'positive_integer')],
'load': [(0, 'bus', True, 'positive_integer')],
'sgen': [(0, 'bus', 1.5, 'positive_integer')],
'switch': [(0, 'bus', 'nan', 'positive_integer'),
(0, 'element', -1.5, 'positive_integer')],
'trafo': [(0, 'hv_bus', False, 'positive_integer'),
(0, 'lv_bus', 'nan', 'positive_integer')],
'trafo3w': [(0, 'hv_bus', False, 'positive_integer'),
(0, 'mv_bus', 0.5, 'positive_integer')]}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_number(self, test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'invalid_values'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.load.loc[0, 'p_kw'] = '1000'
net.load.loc[1, 'q_kvar'] = None
net.gen.loc[0, 'p_kw'] = False
net.sgen.loc[0, 'p_kw'] = -1.5
net.sgen.loc[1, 'q_kvar'] = np.nan
net.ext_grid.loc[0, 'va_degree'] = 13.55
check_result = pp.invalid_values(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'gen': [(0, 'p_kw', False, 'number')],
'load': [(0, 'p_kw', '1000', 'number'), (1, 'q_kvar', 'nan', 'number')],
'sgen': [(1, 'q_kvar', 'nan', 'number')]}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_between_zero_and_one(self, test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'invalid_values'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.line.loc[0, 'df'] = 1.5
check_result = pp.invalid_values(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'line': [(0, 'df', 1.5, '0to1')]}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_switch_type(self, test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'invalid_values'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.switch.loc[0, 'et'] = 'bus'
net.switch.loc[1, 'et'] = 1
net.switch.loc[2, 'et'] = None
net.switch.loc[3, 'et'] = True
net.switch.loc[4, 'et'] = 't'
check_result = pp.invalid_values(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'switch': [(0, 'et', 'bus', 'switch_type'),
(1, 'et', 1, 'switch_type'),
(2, 'et', 'None', 'switch_type'),
(3, 'et', True, 'switch_type')]}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_no_ext_grid(test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'no_ext_grid'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.ext_grid = net.ext_grid.drop(0)
check_result = pp.no_ext_grid(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == True
def test_multiple_voltage_controlling_elements_per_bus(test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'multiple_voltage_controlling_elements_per_bus'
diag_params = copy.deepcopy(diag_params)
report_methods = copy.deepcopy(report_methods)
net.gen.bus.at[0] = 0
pp.create_ext_grid(net, 1)
net.ext_grid.bus.at[1] = 0
check_result = pp.multiple_voltage_controlling_elements_per_bus(net)
if check_result:
diag_results = {check_function: check_result}
else:
diag_results = {}
assert diag_results[check_function] == \
{'buses_with_gens_and_ext_grids': [0],
'buses_with_mult_ext_grids': [0]}
for bool_value in [True, False]:
diag_report = DiagnosticReports(net, diag_results, diag_params, compact_report=bool_value)
report_check = None
try:
eval(report_methods[check_function])
report_check = True
except:
report_check = False
assert report_check
def test_overload(test_net, diag_params, report_methods):
net = copy.deepcopy(test_net)
check_function = 'overload'
diag_params | |
# --------------------------------------------------------------------------- #
# SPEEDMETER Control wxPython IMPLEMENTATION
# Python Code By:
#
# <NAME>, @ 25 Sep 2005
# Latest Revision: 27 Dec 2012, 21.00 GMT
#
#
# TODO List/Caveats
#
# 1. Combination Of The Two Styles:
#
# SM_DRAW_PARTIAL_FILLER
# SM_DRAW_SECTORS
#
# Does Not Work Very Well. It Works Well Only In Case When The Sector Colours
# Are The Same For All Intervals.
#
#
# Thanks To <NAME> That Has Tried The Demo On MacOS, I Corrected A
# Bug On Line 246
#
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# <EMAIL>
# <EMAIL>
#
# Or, Obviously, To The wxPython Mailing List!!!
#
# Tags: phoenix-port, unittest, documented, py3-port
#
# End Of Comments
# --------------------------------------------------------------------------- #
"""
:class:`~wx.lib.agw.speedmeter.SpeedMeter` tries to reproduce the behavior of some car controls (but not only),
by creating an "angular" control (actually, circular).
Description
===========
:class:`SpeedMeter` tries to reproduce the behavior of some car controls (but not only),
by creating an "angular" control (actually, circular). I remember to have seen
it somewhere, and i decided to implement it in wxPython.
:class:`SpeedMeter` starts its construction from an empty bitmap, and it uses some
functions of the :class:`wx.DC` class to create the rounded effects. everything is
processed in the `Draw()` method of :class:`SpeedMeter` class.
This implementation allows you to use either directly the :class:`PaintDC`, or the
better (for me) double buffered style with :class:`BufferedPaintDC`. the double
buffered implementation has been adapted from the wxPython wiki example:
http://wiki.wxpython.org/index.cgi/doublebuffereddrawing
Usage
=====
Usage example::
import wx
import wx.lib.agw.speedmeter as SM
class MyFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "SpeedMeter Demo")
speed = SM.SpeedMeter(self, agwStyle=SM.SM_DRAW_HAND|SM.SM_DRAW_SECTORS|SM.SM_DRAW_MIDDLE_TEXT|SM.SM_DRAW_SECONDARY_TICKS)
# Set The Region Of Existence Of SpeedMeter (Always In Radians!!!!)
speed.SetAngleRange(-pi/6, 7*pi/6)
# Create The Intervals That Will Divide Our SpeedMeter In Sectors
intervals = range(0, 201, 20)
speed.SetIntervals(intervals)
# Assign The Same Colours To All Sectors (We Simulate A Car Control For Speed)
# Usually This Is Black
colours = [wx.BLACK]*10
speed.SetIntervalColours(colours)
# Assign The Ticks: Here They Are Simply The String Equivalent Of The Intervals
ticks = [str(interval) for interval in intervals]
speed.SetTicks(ticks)
# Set The Ticks/Tick Markers Colour
speed.SetTicksColour(wx.WHITE)
# We Want To Draw 5 Secondary Ticks Between The Principal Ticks
speed.SetNumberOfSecondaryTicks(5)
# Set The Font For The Ticks Markers
speed.SetTicksFont(wx.Font(7, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
# Set The Text In The Center Of SpeedMeter
speed.SetMiddleText("Km/h")
# Assign The Colour To The Center Text
speed.SetMiddleTextColour(wx.WHITE)
# Assign A Font To The Center Text
speed.SetMiddleTextFont(wx.Font(8, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD))
# Set The Colour For The Hand Indicator
speed.SetHandColour(wx.Colour(255, 50, 0))
# Do Not Draw The External (Container) Arc. Drawing The External Arc May
# Sometimes Create Uglier Controls. Try To Comment This Line And See It
# For Yourself!
speed.DrawExternalArc(False)
# Set The Current Value For The SpeedMeter
speed.SetSpeedValue(44)
# our normal wxApp-derived class, as usual
app = wx.App(0)
frame = MyFrame(None)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
Methods and Settings
====================
:class:`SpeedMeter` is highly customizable, and in particular you can set:
- The start and end angle of existence for :class:`SpeedMeter`;
- The intervals in which you divide the :class:`SpeedMeter` (numerical values);
- The corresponding thicks for the intervals;
- The interval colours (different intervals may have different filling colours);
- The ticks font and colour;
- The background colour (outsize the :class:`SpeedMeter` region);
- The external arc colour;
- The hand (arrow) colour;
- The hand's shadow colour;
- The hand's style ("arrow" or "hand");
- The partial filler colour;
- The number of secondary (intermediate) ticks;
- The direction of increasing speed ("advance" or "reverse");
- The text to be drawn in the middle and its font;
- The icon to be drawn in the middle;
- The first and second gradient colours (that fills the :class:`SpeedMeter` control);
- The current value.
Window Styles
=============
This class supports the following window styles:
=========================== =========== ==================================================
Window Styles Hex Value Description
=========================== =========== ==================================================
``SM_ROTATE_TEXT`` 0x1 Draws the ticks rotated: the ticks are rotated accordingly to the tick marks positions.
``SM_DRAW_SECTORS`` 0x2 Different intervals are painted in differend colours (every sector of the circle has its own colour).
``SM_DRAW_PARTIAL_SECTORS`` 0x4 Every interval has its own colour, but only a circle corona is painted near the ticks.
``SM_DRAW_HAND`` 0x8 The hand (arrow indicator) is drawn.
``SM_DRAW_SHADOW`` 0x10 A shadow for the hand is drawn.
``SM_DRAW_PARTIAL_FILLER`` 0x20 A circle corona that follows the hand position is drawn near the ticks.
``SM_DRAW_SECONDARY_TICKS`` 0x40 Intermediate (smaller) ticks are drawn between principal ticks.
``SM_DRAW_MIDDLE_TEXT`` 0x80 Some text is printed in the middle of the control near the center.
``SM_DRAW_MIDDLE_ICON`` 0x100 An icon is drawn in the middle of the control near the center.
``SM_DRAW_GRADIENT`` 0x200 A gradient of colours will fill the control.
``SM_DRAW_FANCY_TICKS`` 0x400 With this style you can use xml tags to create some custom text and draw it at the ticks position. See :mod:`lib.fancytext` for the tags.
=========================== =========== ==================================================
Events Processing
=================
`No custom events are available for this class.`
License And Version
===================
:class:`SpeedMeter` is distributed under the wxPython license.
Latest revision: <NAME> @ 27 Dec 2012, 21.00 GMT
Version 0.3
"""
#----------------------------------------------------------------------
# Beginning Of SPEEDMETER wxPython Code
#----------------------------------------------------------------------
import wx
import wx.lib.colourdb
import wx.lib.fancytext as fancytext
from math import pi, sin, cos, log, sqrt, atan2
#----------------------------------------------------------------------
# DC Drawing Options
#----------------------------------------------------------------------
# SM_NORMAL_DC Uses The Normal wx.PaintDC
# SM_BUFFERED_DC Uses The Double Buffered Drawing Style
SM_NORMAL_DC = 0
""" Uses the normal :class:`PaintDC`. """
SM_BUFFERED_DC = 1
""" Uses a double buffered drawing code. """
#----------------------------------------------------------------------
# SpeedMeter Styles
#----------------------------------------------------------------------
# SM_ROTATE_TEXT: Draws The Ticks Rotated: The Ticks Are Rotated
# Accordingly To The Tick Marks Positions
# SM_DRAW_SECTORS: Different Intervals Are Painted In Differend Colours
# (Every Sector Of The Circle Has Its Own Colour)
# SM_DRAW_PARTIAL_SECTORS: Every Interval Has Its Own Colour, But Only
# A Circle Corona Is Painted Near The Ticks
# SM_DRAW_HAND: The Hand (Arrow Indicator) Is Drawn
# SM_DRAW_SHADOW: A Shadow For The Hand Is Drawn
# SM_DRAW_PARTIAL_FILLER: A Circle Corona That Follows The Hand Position
# Is Drawn Near The Ticks
# SM_DRAW_SECONDARY_TICKS: Intermediate (Smaller) Ticks Are Drawn Between
# Principal Ticks
# SM_DRAW_MIDDLE_TEXT: Some Text Is Printed In The Middle Of The Control
# Near The Center
# SM_DRAW_MIDDLE_ICON: An Icon Is Drawn In The Middle Of The Control Near
# The Center
# SM_DRAW_GRADIENT: A Gradient Of Colours Will Fill The Control
# SM_DRAW_FANCY_TICKS: With This Style You Can Use XML Tags To Create
# Some Custom Text And Draw It At The Ticks Position.
# See wx.lib.fancytext For The Tags.
SM_ROTATE_TEXT = 1
""" Draws the ticks rotated: the ticks are rotated accordingly to the tick marks positions. """
SM_DRAW_SECTORS = 2
""" Different intervals are painted in differend colours (every sector of the circle has its own colour). """
SM_DRAW_PARTIAL_SECTORS = 4
""" Every interval has its own colour, but only a circle corona is painted near the ticks. """
SM_DRAW_HAND = 8
""" The hand (arrow indicator) is drawn. """
SM_DRAW_SHADOW = 16
""" A shadow for the hand is drawn. """
SM_DRAW_PARTIAL_FILLER = 32
""" A circle corona that follows the hand position is drawn near the ticks. """
SM_DRAW_SECONDARY_TICKS = 64
""" Intermediate (smaller) ticks are drawn between principal ticks. """
SM_DRAW_MIDDLE_TEXT = 128
""" Some text is printed in the middle of the control near the center. """
SM_DRAW_MIDDLE_ICON = 256
""" An icon is drawn in the middle of the control near the center. """
SM_DRAW_GRADIENT = 512
""" A gradient of colours will fill the control. """
SM_DRAW_FANCY_TICKS = 1024
""" With this style you can use xml tags to create some custom text and draw it at the ticks position. See :mod:`lib.fancytext` for the tags. """
#----------------------------------------------------------------------
# Event Binding
#----------------------------------------------------------------------
# SM_MOUSE_TRACK: The Mouse Left Click/Drag Allow You To Change The
# SpeedMeter Value Interactively
SM_MOUSE_TRACK = 1
""" Flag to allow the left/right click of the mouse to change the :class:`SpeedMeter` value interactively. """
fontfamily = list(range(70, 78))
familyname = ["default", "decorative", "roman", "script", "swiss", "modern", "teletype"]
weights = list(range(90, 93))
weightsname = ["normal", "light", "bold"]
styles = [90, 93, 94]
stylesname = ["normal", "italic", "slant"]
#----------------------------------------------------------------------
# BUFFERENDWINDOW Class
# This Class Has Been Taken From The wxPython Wiki, And Slightly
# Adapted To Fill My Needs. See:
#
# http://wiki.wxpython.org/index.cgi/DoubleBufferedDrawing
#
# For More Info About DC And Double Buffered Drawing.
#----------------------------------------------------------------------
class BufferedWindow(wx.Window):
"""
A buffered window class.
To use it, subclass it and define a `Draw(dc)` method that takes a `dc`
to draw to. In that method, put the code needed to draw the picture
you want. The window will automatically be double buffered, and the
screen will be automatically updated when a Paint event is received.
When the drawing needs to | |
#!/usr/bin/env python3
# coding=utf-8
from itertools import cycle
from statistics import mean
from typing import Iterable
from .core import *
from ..simulator import Sim, SchedulingFailure
class SFF(object):
@Sim.register_reset_global_fields
class Props:
def __init__(self):
self.counter_packets_put_on_wire: int = 0
self.consider_link_capacity: bool = None
self.linkBwCap: List[List[int]] = None
self.linkBwRemaining: List[List[int]] = None
self.linkLatency: List[List[int]] = None
self.latencyProvider: Dict[int, Iterable[int]] = None
self.end_to_end_latency: List[List[int]] = None
self.end_to_end_bw: List[List[int]] = None
self.end_to_end_next_hop: List[List[int]] = None
self.allSFFs: Dict[int, 'SFF'] = dict()
@staticmethod
@Sim.register_simulation_start_hook
def init_data_structure(sim: Sim):
# make sure that we reset the data structure only if we are already at the beginning of the simulation,
# of if somebody calls this function explicitly (before starting the simulation)
sff_props = sim.props.sff
if not sim.run or sff_props.linkLatency is None:
di = len(sff_props.allSFFs)
sff_props.linkLatency = [[0 for _ in range(di)] for _ in range(di)]
sff_props.linkBwRemaining = [[0 for _ in range(di)] for _ in range(di)]
sff_props.linkBwCap = [[0 for _ in range(di)] for _ in range(di)]
sff_props.end_to_end_latency = [[0 for _ in range(di)] for _ in range(di)]
sff_props.end_to_end_bw = [[0 for _ in range(di)] for _ in range(di)]
sff_props.end_to_end_next_hop = [[0 for _ in range(di)] for _ in range(di)]
@staticmethod
@Sim.register_stop_sim_catch_all_packets_hooks
def callback_get_all_packets_from_sff(sim: Sim):
for sff in sim.props.sff.allSFFs.values():
if sff.scheduler.requires_queues_per_class():
for q in sff.packet_queue_per_class:
for p in sff.packet_queue_per_class[q]:
p.timeQueueScheduling += p.get_delta_of_time_mark()
yield p
else:
for p in sff.packet_queue:
p.timeQueueScheduling += p.get_delta_of_time_mark()
yield p
def __repr__(self):
return f"SFF({str(self.id)}/{self.get_number_of_queued_packets()})"
def __init__(self, sim: Sim, scheduler):
if sim.props.sff.linkLatency is not None:
raise NameError(
"Cannot create new SFF after initializing the data structures. " +
"You have to create all SFFs, and then setup the connections.")
self.id = len(sim.props.sff.allSFFs)
sim.props.sff.allSFFs[self.id] = self
self.sim = sim
self.scheduler = scheduler
self.SFIsPerType: Dict[int, List['SFI']] = dict()
self.servers = set()
self.service_rate_per_sf: Dict[int, float] = {}
# this is either a single queue, or a dictionary of queues
if self.scheduler.requires_queues_per_class():
self.packet_queue_per_class: Dict[int, deque] = dict()
else:
self.packet_queue: deque = deque()
scheduler.assign_sff(self)
# holds all queued events when the outgoing link does not provide
# enough capacity
self.outQueue = dict()
@staticmethod
def init_end_to_end_paths(sim: Sim):
di = len(sim.props.sff.allSFFs)
# initialize with direct edges
for s in range(di):
for d in range(di):
assert (sim.props.sff.end_to_end_bw[s][d] == 0)
# draw randomly 500 latencies and calculate the mean
expected_latencies = [mean([next(sim.props.sff.latencyProvider[dist]) for _ in range(500)])
for dist in sim.props.sff.latencyProvider]
for s in range(di):
for d in range(di):
if sim.props.sff.linkBwCap[s][d] > 0:
sim.props.sff.end_to_end_bw[s][d] = sim.props.sff.linkBwCap[s][d]
sim.props.sff.end_to_end_latency[s][d] = expected_latencies[sim.props.sff.linkLatency[s][d]]
sim.props.sff.end_to_end_next_hop[s][d] = d
# this is simply floyd warshall
for via in range(di):
for s in range(di):
for d in range(di):
if s == d:
continue
if s == via or d == via:
continue
# check if s->d using "via" is a valid connection
if sim.props.sff.end_to_end_bw[s][via] > 0 and sim.props.sff.end_to_end_bw[via][d] > 0:
# faster?
new_latency = sim.props.sff.end_to_end_latency[s][via] + sim.props.sff.end_to_end_latency[via][
d]
if sim.props.sff.end_to_end_bw[s][d] == 0 or new_latency < sim.props.sff.end_to_end_latency[s][
d]:
# either new latency is faster, or there was no connection before
sim.props.sff.end_to_end_latency[s][d] = new_latency
sim.props.sff.end_to_end_next_hop[s][d] = sim.props.sff.end_to_end_next_hop[s][via]
sim.props.sff.end_to_end_bw[s][d] = min(sim.props.sff.end_to_end_bw[s][via],
sim.props.sff.end_to_end_bw[via][d])
def get_number_of_queued_packets(self):
if self.scheduler.requires_queues_per_class():
return sum(map(len, self.packet_queue_per_class.values()))
else:
return len(self.packet_queue)
def route_packet_to_sfi(self, packet: 'Packet', sfi: 'SFI'):
if self.sim.DEBUG:
print("route packet to sfi " + str(sfi))
packet.mark_time()
sfi_props: 'SFI.Props' = self.sim.props.sfi
sff_props: SFF.Props = self.sim.props.sff
delay = next(sff_props.latencyProvider[sfi_props.latency_provider])
self.sim.schedule_event(NetworkDelayEvent(delay=delay,
inner_packet=packet,
source=self,
dest_id=sfi.id,
source_is_sff=True,
dest_is_sff=False))
def free_bw_resource_to_dest_id(self, dest_id, packet: Packet):
assert self.sim.props.sff.consider_link_capacity
self.sim.props.sff.linkBwRemaining[self.id][dest_id] += packet.transmission_size
# sends a packet to a SFF
def route_packet_to_sff_id(self, packet: Packet, dest_id):
if self.sim.DEBUG:
print("route packet to sff" + str(dest_id))
# mark current time for statistics
packet.mark_time()
sff_props: SFF.Props = self.sim.props.sff
if sff_props.linkBwCap[self.id][dest_id] < packet.transmission_size:
print("end 2 end links says capacity:{0} using next hop {1}".
format(sff_props.end_to_end_bw[self.id][dest_id], sff_props.end_to_end_next_hop[self.id][dest_id]))
raise NameError(f"cannot route this packet {self.id}->{dest_id}, because bw capacity " +
f"({sff_props.linkBwCap[self.id][dest_id]}) is not sufficient for packet " +
f"of size ({packet.transmission_size})!")
if sff_props.consider_link_capacity:
if not (dest_id in self.outQueue):
self.outQueue[dest_id] = []
if (len(self.outQueue[dest_id]) == 0 and packet.transmission_size <=
sff_props.linkBwRemaining[self.id][dest_id]):
self.put_packet_on_wire(packet=packet, dest_id=dest_id)
else:
# enqueue packet
self.outQueue[dest_id].append(packet)
else:
self.put_packet_on_wire(packet=packet, dest_id=dest_id)
# internal method, don't call directly
# this method does not check if some packet are in the out queue
# use route_packet_to_sff_id() for sending packets to a SFF
def put_packet_on_wire(self, packet: Packet, dest_id):
packet.timeQueueNetwork += packet.get_delta_of_time_mark()
packet.mark_time()
sff_props: SFF.Props = self.sim.props.sff
# enough bw so that we can send the packet immediately
delay = next(sff_props.latencyProvider[sff_props.linkLatency[self.id][dest_id]])
self.sim.schedule_event(NetworkDelayEvent(delay=delay,
inner_packet=packet, source=self, dest_id=dest_id))
if sff_props.consider_link_capacity:
sff_props.linkBwRemaining[self.id][dest_id] -= packet.transmission_size
def route_packet_to_next_hop(self, packet: Packet):
next_hop_type, next_hop = packet.fullPath[packet.pathPosition]
packet.pathPosition += 1
if self.sim.PACKET_ID_TO_DEBUG == packet.id:
print(f'** packet at sff {self.id}, route to next hop')
if next_hop_type == 'SFF':
if self.sim.DEBUG:
print(". packet goes to another SFF")
# we send this packet to the other SFF
if packet.id == self.sim.PACKET_ID_TO_DEBUG:
print("** debug packet route to {0}".format(str(next_hop)))
self.route_packet_to_sff_id(packet, next_hop.id)
elif next_hop_type == 'SFI':
if self.sim.DEBUG:
print(". packet goes to a SFI")
# we send this packet to the SFI
self.route_packet_to_sfi(packet, next_hop)
else:
NameError(f"unknown next hop: {next_hop_type}")
def check_and_update_packet_and_return_if_process_locally(self, packet: Packet, source: str) -> bool:
if self.sim.TRACE_PACKET_PATH:
packet.visitedHops.append(self)
if self.sim.DEBUG:
print(f'at SFF {self.id}, receive a packet from {source}; '
f'packet: id:{packet.id} sfc:{packet.flow.sfTypeChain} '
f'remaining path:{packet.fullPath[packet.pathPosition:]}')
if packet.id == self.sim.PACKET_ID_TO_DEBUG:
print(
f"** @{self.sim.currentTime} debug packet at {str(self)}, path={packet.fullPath}, coming from {source}")
if packet.flow.qosMaxDelay < (self.sim.currentTime - packet.time_ingress):
if self.sim.DEBUG:
print(". drop packet because of timeout")
# print(". drop packet because of timeout " + source)
packet.drop_timed_out(self)
return False
if packet.processing_done:
# do we have to add the path to the egress?
if len(packet.toBeVisited) == 0 and len(
packet.fullPath) == packet.pathPosition and self.id != packet.flow.desiredEgressSSFid:
# add the path to the egress
path_to_dest = self.get_multi_hop_path_for(self.sim, self.id, packet.flow.desiredEgressSSFid)
for sff_id in path_to_dest:
packet.fullPath.append((SFF.__name__, self.sim.props.sff.allSFFs[sff_id]))
# we reached already the egress?
if len(packet.fullPath) == packet.pathPosition:
if self.sim.DEBUG:
print(". packet reached egress")
# we should be at the egress
assert (self.id == packet.flow.desiredEgressSSFid)
# there should be no remaining sf type
assert (len(packet.toBeVisited) == 0)
packet.done()
return False
else:
self.route_packet_to_next_hop(packet)
return False
elif len(packet.fullPath) > packet.pathPosition:
self.route_packet_to_next_hop(packet)
return False
return True
def handle_packet_from_ingress(self, packet: Packet):
if self.check_and_update_packet_and_return_if_process_locally(packet, f'ingress'):
self.apply_logic_packet_from_ingress(packet)
def handle_packet_from_other_sff(self, packet: Packet, other_sff: 'SFF'):
if self.check_and_update_packet_and_return_if_process_locally(packet, f'a sff {other_sff.id}'):
self.apply_logic_packet_from_other_sff(packet, other_sff)
def handle_packet_from_sfi(self, packet: Packet, sfi: 'SFI'):
if self.check_and_update_packet_and_return_if_process_locally(packet, f'a sfi {sfi.id}'):
self.apply_logic_packet_from_sfi(packet, sfi)
def handle_packet_from_scheduler(self, packet: Packet):
if self.sim.PACKET_ID_TO_DEBUG == packet.id:
print(f'** receive packet from scheduler..')
Packet.debug_print_path(packet.fullPath, packet.pathPosition)
if self.check_and_update_packet_and_return_if_process_locally(packet, 'scheduler'):
Packet.debug_packet(packet)
raise NameError(f'scheduler returns a packet {packet.id}, but sff does not know how to proceed with this '
f'packet')
def apply_logic_packet_from_other_sff(self, packet, other_sff: 'SFF'):
self.put_packet_in_queue(packet)
self.inform_scheduler_about_packet(packet)
def apply_logic_packet_from_ingress(self, packet):
self.put_packet_in_queue(packet)
self.inform_scheduler_about_packet(packet)
def apply_logic_packet_from_sfi(self, packet, sfi: 'SFI'):
self.put_packet_in_queue(packet)
self.inform_scheduler_about_packet(packet)
def inform_scheduler_about_packet(self, packet):
if self.sim.DEBUG or packet.id == self.sim.PACKET_ID_TO_DEBUG:
print(f'** send packet ({packet.id}) to scheduler')
packet.seenByScheduler += 1
try:
self.scheduler.handle_packet_arrival(packet=packet)
except SchedulingFailure as e:
if self.sim.STOP_SIMULATION_IF_SCHEDULER_WAS_UNSUCCESSFUL:
raise e
elif self.sim.DEBUG:
print(
". scheduler was unsuccessful to schedule a packet: " +
str(e))
def put_packet_in_queue(self, packet):
if self.scheduler.requires_queues_per_class():
queue = Flow.get_packet_class_of_packet(packet)
if queue not in self.packet_queue_per_class:
self.packet_queue_per_class[queue] = deque()
if self.sim.DEBUG:
print(f'. add packet of sfc {packet.flow.sfc_identifier} '
f'at position {packet.sfc_position} to queue {queue}')
self.packet_queue_per_class[queue].append(packet)
else:
self.packet_queue.append(packet)
packet.mark_time() # mark time when this packet was queued to the scheduler
def register_sfi(self, sfi: 'SFI'):
if not (sfi.of_type in self.SFIsPerType):
self.SFIsPerType[sfi.of_type] = []
# check here that we don't get for a server multiplel SFI of the same
# type
for otherSFI in self.SFIsPerType[sfi.of_type]:
if otherSFI.server == sfi.server:
raise NameError(
"this configuration is not allowed! A SFF should" +
" see for a single server only one SFI instance per type!")
self.SFIsPerType[sfi.of_type].append(sfi)
self.servers.add(sfi.server)
if sfi.of_type not in self.service_rate_per_sf:
self.service_rate_per_sf[sfi.of_type] = 0
self.service_rate_per_sf[sfi.of_type] += sfi.get_expected_processing_rate()
def sfi_finishes_processing_of_packet(self, sfi: 'SFI', packet: Packet):
# the given SFI is finished with processing, so depeding on the server cpu sharing policy,
# this server might be free now
self.scheduler.notify_sfi_finished_processing_of_packet(sfi, packet)
@staticmethod
| |
<filename>build/docker/zap-baseline.py
#!/usr/bin/env python
# Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2016 ZAP Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script runs a baseline scan against a target URL using ZAP
#
# It can either be run 'standalone', in which case depends on
# https://pypi.python.org/pypi/python-owasp-zap-v2.4 and Docker, or it can be run
# inside one of the ZAP docker containers. It automatically detects if it is
# running in docker so the parameters are the same.
#
# By default it will spider the target URL for one minute, but you can change
# that via the -m parameter.
# It will then wait for the passive scanning to finish - how long that takes
# depends on the number of pages found.
# It will exit with codes of:
# 0: Success
# 1: At least 1 FAIL
# 2: At least one WARN and no FAILs
# 3: Any other failure
# By default all alerts found by ZAP will be treated as WARNings.
# You can use the -c or -u parameters to specify a configuration file to override
# this.
# You can generate a template configuration file using the -g parameter. You will
# then need to change 'WARN' to 'FAIL', 'INFO' or 'IGNORE' for the rules you want
# to be handled differently.
# You can also add your own messages for the rules by appending them after a tab
# at the end of each line.
import getopt
import json
import logging
import os
import os.path
import re
import socket
import subprocess
import sys
import time
import traceback
import urllib2
from datetime import datetime
from random import randint
from zapv2 import ZAPv2
timeout = 120
config_dict = {}
config_msg = {}
out_of_scope_dict = {}
running_in_docker = os.path.exists('/.dockerenv')
levels = ["PASS", "IGNORE", "INFO", "WARN", "FAIL"]
min_level = 0
# Pscan rules that aren't really relevant, eg the examples rules in the alpha set
blacklist = ['-1', '50003', '60000', '60001']
# Pscan rules that are being addressed
in_progress_issues = {}
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
def usage():
print ('Usage: zap-baseline.py -t <target> [options]')
print (' -t target target URL including the protocol, eg https://www.example.com')
print ('Options:')
print (' -c config_file config file to use to INFO, IGNORE or FAIL warnings')
print (' -u config_url URL of config file to use to INFO, IGNORE or FAIL warnings')
print (' -g gen_file generate default config file (all rules set to WARN)')
print (' -m mins the number of minutes to spider for (default 1)')
print (' -r report_html file to write the full ZAP HTML report')
print (' -w report_md file to write the full ZAP Wiki (Markdown) report')
print (' -x report_xml file to write the full ZAP XML report')
print (' -a include the alpha passive scan rules as well')
print (' -d show debug messages')
print (' -P specify listen port')
print (' -D delay in seconds to wait for passive scanning ')
print (' -i default rules not in the config file to INFO')
print (' -j use the Ajax spider in addition to the traditional one')
print (' -l level minimum level to show: PASS, IGNORE, INFO, WARN or FAIL, use with -s to hide example URLs')
print (' -n context_file context file which will be loaded prior to spidering the target')
print (' -p progress_file progress file which specifies issues that are being addressed')
print (' -s short output format - dont show PASSes or example URLs')
print (' -z zap_options ZAP command line options e.g. -z "-config aaa=bbb -config ccc=ddd"')
print ('')
print ('For more details see https://github.com/zaproxy/zaproxy/wiki/ZAP-Baseline-Scan')
def load_config(config):
for line in config:
if not line.startswith('#') and len(line) > 1:
(key, val, optional) = line.rstrip().split('\t', 2)
if key == 'OUTOFSCOPE':
for plugin_id in val.split(','):
if not plugin_id in out_of_scope_dict:
out_of_scope_dict[plugin_id] = []
out_of_scope_dict[plugin_id].append(re.compile(optional))
else:
config_dict[key] = val
if '\t' in optional:
(ignore, usermsg) = optional.rstrip().split('\t')
config_msg[key] = usermsg
else:
config_msg[key] = ''
def is_in_scope(plugin_id, url):
if '*' in out_of_scope_dict:
for oos_prog in out_of_scope_dict['*']:
#print('OOS Compare ' + oos_url + ' vs ' + 'url)
if oos_prog.match(url):
#print('OOS Ignoring ' + str(plugin_id) + ' ' + url)
return False
#print 'Not in * dict'
if plugin_id in out_of_scope_dict:
for oos_prog in out_of_scope_dict[plugin_id]:
#print('OOS Compare ' + oos_url + ' vs ' + 'url)
if oos_prog.match(url):
#print('OOS Ignoring ' + str(plugin_id) + ' ' + url)
return False
#print 'Not in ' + plugin_id + ' dict'
return True
def print_rule(action, alert_list, detailed_output, user_msg):
if min_level > levels.index(action):
return;
id = alert_list[0].get('pluginId')
if id in in_progress_issues:
print (action + '-IN_PROGRESS: ' + alert_list[0].get('alert') + ' [' + id + '] x ' + str(len(alert_list)) + ' ' + user_msg)
if in_progress_issues[id]["link"]:
print ('\tProgress link: ' + in_progress_issues[id]["link"])
else:
print (action + '-NEW: ' + alert_list[0].get('alert') + ' [' + id + '] x ' + str(len(alert_list)) + ' ' + user_msg)
if detailed_output:
# Show (up to) first 5 urls
for alert in alert_list[0:5]:
print ('\t' + alert.get('url'))
def dump_log_file(cid):
traceback.print_exc()
# Unexpected issue - dump the zap.log file
if running_in_docker:
zap_log = '/zap/zap.out'
if os.path.isfile(zap_log):
with open(zap_log, 'r') as zlog:
for line in zlog:
sys.stderr.write(line)
else:
logging.debug ('Failed to find zap_log ' + zap_log)
else:
logging.debug ('Dumping docker logs')
subprocess.call(["docker", "logs", cid], stdout = sys.stderr)
def main(argv):
global min_level
global in_progress_issues
cid = ''
context_file = ''
progress_file = ''
config_file = ''
config_url = ''
generate = ''
mins = 1
port = 0
detailed_output = True
report_html = ''
report_md = ''
report_xml = ''
target = ''
zap_alpha = False
info_unspecified = False
ajax = False
base_dir = ''
zap_ip = 'localhost'
zap_options = ''
delay = 0
pass_count = 0
warn_count = 0
fail_count = 0
info_count = 0
ignore_count = 0
warn_inprog_count = 0
fail_inprog_count = 0
try:
opts, args = getopt.getopt(argv,"t:c:u:g:m:n:r:w:x:l:daijp:sz:P:D:")
except getopt.GetoptError, exc:
logging.warning ('Invalid option ' + exc.opt + ' : ' + exc.msg)
usage()
sys.exit(3)
for opt, arg in opts:
if opt == '-t':
target = arg
logging.debug ('Target: ' + target)
elif opt == '-c':
config_file = arg
elif opt == '-u':
config_url = arg
elif opt == '-g':
generate = arg
elif opt == '-d':
logging.getLogger().setLevel(logging.DEBUG)
elif opt == '-m':
mins = int(arg)
elif opt == '-P':
port = int(arg)
elif opt == '-D':
delay = int(arg)
elif opt == '-n':
context_file = arg
elif opt == '-p':
progress_file = arg
elif opt == '-r':
report_html = arg
elif opt == '-w':
report_md = arg
elif opt == '-x':
report_xml = arg
elif opt == '-a':
zap_alpha = True
elif opt == '-i':
info_unspecified = True
elif opt == '-j':
ajax = True
elif opt == '-l':
try:
min_level = levels.index(arg)
except ValueError:
logging.warning ('Level must be one of ' + str(levels))
usage()
sys.exit(3)
elif opt == '-z':
zap_options = arg
elif opt == '-s':
detailed_output = False
# Check target supplied and ok
if len(target) == 0:
usage()
sys.exit(3)
if not (target.startswith('http://') or target.startswith('https://')):
logging.warning ('Target must start with \'http://\' or \'https://\'')
usage()
sys.exit(3)
if running_in_docker:
base_dir = '/zap/wrk/'
if len(config_file) > 0 or len(generate) > 0 or len(report_html) > 0 or len(report_xml) > 0 or len(progress_file) > 0 or len(context_file) > 0:
# Check directory has been mounted
if not os.path.exists(base_dir):
logging.warning ('A file based option has been specified but the directory \'/zap/wrk\' is not mounted ')
usage()
sys.exit(3)
# Choose a random 'ephemeral' port and check its available if it wasn't specified with -P option
if port == 0:
while True:
port = randint(32768, 61000)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not (sock.connect_ex(('127.0.0.1', port)) | |
d_model, pdrop, scale=scale, d_k=d_k)
self.ffn = FFN(d_model, activation_type, d_ff, ffn_pdrop, name="ffn")
self.ln1 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_eps)
self.ln2 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_eps)
self.dropout = tf.keras.layers.Dropout(pdrop)
def call(self, inputs):
"""
:param inputs: `(x, mask)`
:return: The output tensor
"""
x, mask = inputs
if not self.layer_norms_after:
x = self.ln1(x)
h = self.self_attn((x, x, x, mask))
x = x + self.dropout(h, TRAIN_FLAG())
x = self.ln2(x)
x = x + self.dropout(self.ffn(x), TRAIN_FLAG())
if self.layer_norms_after:
x = self.ln1(x)
return x
class SpatialGatingUnit(tf.keras.layers.Layer):
"""Spatial gating unit
There are 2 ways we can look at this unit, as an MLP or a Conv with kernel length 1
l = nn.Linear(T, T)
c = nn.Conv1d(T, T, 1)
l(x.transpose(1, 2)).transpose(1, 2)
c(x)
"""
def __init__(self,
d_ffn: int,
nctx: int,
layer_norm_eps: float = 1.0e-6,
name: Optional[str] = None,
):
super().__init__(name=name)
self.norm = tf.keras.layers.LayerNormalization(epsilon=layer_norm_eps)
self.proj = tf.keras.layers.Conv1D(filters=nctx, kernel_size=1, data_format="channels_first", bias_initializer='ones')
def call(self, x):
u, v = tf.split(x, 2, axis=-1)
# "channel" layer norm
v = self.norm(v)
v = self.proj(v)
return u * v
class GatedMLPEncoder(tf.keras.layers.Layer):
"""Following https://arxiv.org/pdf/2105.08050.pdf
"""
def __init__(
self,
d_model: int,
pdrop: float,
nctx: int = 256,
activation_type: str = "gelu",
d_ff: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6,
name: Optional[str] = None,
):
super().__init__(name=name)
# to properly execute BERT models, we have to follow T2T and do layer norms after
self.d_model = d_model
self.d_ff = d_ff if d_ff is not None else 4 * d_model
self.to_ffn = tf.keras.layers.Dense(self.d_ff)
self.activation = get_activation(activation_type)
self.ffn_drop = tf.keras.layers.Dropout(ffn_pdrop)
self.from_sgu = tf.keras.layers.Dense(self.d_model)
self.norm = tf.keras.layers.LayerNormalization(epsilon=layer_norm_eps)
self.dropout = tf.keras.layers.Dropout(pdrop)
self.spatial_gating_unit = SpatialGatingUnit(d_ff, nctx, layer_norm_eps)
def call(self, inputs):
"""
:param inputs: `(x, mask)`
:return: The output tensor
"""
# The shortcut here happens pretty early
shortcut, mask = inputs
# A "channel" norm
x = self.norm(shortcut)
# A "channel" FFN
x = self.dropout(self.to_ffn(x))
# gelu according to https://arxiv.org/pdf/2105.08050.pdf
x = self.activation(x)
# "spatial" projection (over T)
x = self.spatial_gating_unit(x)
# "channel" projection
x = self.from_sgu(x)
x = self.dropout(x)
return x + shortcut
class TransformerDecoder(tf.keras.layers.Layer):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
activation_type: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
layer_drop: float = 0.0,
name: str = None
):
super().__init__(name=name)
self.d_model = d_model
self.layer_norms_after = layer_norms_after
self.d_ff = d_ff if d_ff is not None else 4 * d_model
if rpr_k is not None:
self.self_attn = MultiHeadedRelativeAttention(num_heads, d_model, rpr_k, pdrop, scale, d_k=d_k, name="self_attention")
self.src_attn = MultiHeadedRelativeAttention(num_heads, d_model, rpr_k, pdrop, scale, d_k=d_k, name="src_attention")
else:
self.self_attn = MultiHeadedAttention(num_heads, d_model, pdrop, scale, d_k=d_k, name="self_attention")
self.src_attn = MultiHeadedAttention(num_heads, d_model, pdrop, scale, d_k=d_k, name="src_attention")
self.ffn = FFN(d_model, activation_type, d_ff, pdrop=ffn_pdrop, name="ffn")
self.ln1 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_eps)
self.ln2 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_eps)
self.ln3 = tf.keras.layers.LayerNormalization(epsilon=layer_norm_eps)
self.dropout = tf.keras.layers.Dropout(pdrop)
def call(self, inputs):
x, memory, src_mask, tgt_mask = inputs
if not self.layer_norms_after:
x = self.ln1(x)
x = x + self.dropout(self.self_attn((x, x, x, tgt_mask)), TRAIN_FLAG())
x = self.ln2(x)
x = x + self.dropout(self.src_attn((x, memory, memory, src_mask)), TRAIN_FLAG())
x = self.ln3(x)
x = x + self.dropout(self.ffn(x), TRAIN_FLAG())
if self.layer_norms_after:
x = self.ln1(x)
return x
class GatedMLPEncoderStack(tf.keras.layers.Layer):
"""Following https://arxiv.org/pdf/2105.08050.pdf
"""
def __init__(
self,
d_model: int,
pdrop: float,
layers: int = 1,
nctx: int = 256,
activation: str = "gelu",
d_ff: Optional[int] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norm_eps: float = 1.0e-6,
layer_drop: float = 0.0,
name: Optional[str] = None,
**kwargs,
):
super().__init__(name=name)
self.encoders = []
self.ln = tf.keras.layers.LayerNormalization(epsilon=layer_norm_eps)
self.output_dim = d_model
self.layer_drop = layer_drop
for i in range(layers):
self.encoders.append(
GatedMLPEncoder(
d_model, pdrop, nctx, activation, d_ff,
ffn_pdrop=ffn_pdrop,
layer_norm_eps=layer_norm_eps,
)
)
def call(self, inputs):
x, mask = inputs
for layer in self.encoders:
x = layer((x, mask))
return self.ln(x)
class TransformerEncoderStack(tf.keras.layers.Layer):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: float,
scale: bool = True,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
windowed_ra: bool = False,
rpr_value_on: bool = True,
layer_drop: float = 0.0,
name: Optional[str] = None,
**kwargs,
):
super().__init__(name=name)
self.encoders = []
self.ln = tf.identity if layer_norms_after else tf.keras.layers.LayerNormalization(epsilon=layer_norm_eps)
self.layer_drop = layer_drop
if not is_sequence(rpr_k):
rpr_k = [rpr_k] * layers
for i in range(layers):
self.encoders.append(
TransformerEncoder(
num_heads, d_model, pdrop, scale, activation, d_ff, d_k,
rpr_k=rpr_k[i], ffn_pdrop=ffn_pdrop,
layer_norms_after=layer_norms_after, layer_norm_eps=layer_norm_eps, windowed_ra=windowed_ra,
rpr_value_on=rpr_value_on, name=name,
)
)
def call(self, inputs):
x, mask = inputs
for layer in self.encoders:
## TODO: FIXME I dont work on TPUs!!!
##pdrop = tf.random.uniform([])
##if not TRAIN_FLAG() or (pdrop >= self.layer_drop):
x = layer((x, mask))
return self.ln(x)
class TransformerEncoderStackWithLengths(TransformerEncoderStack):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: bool,
scale: bool = True,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
layer_drop: float = 0.0,
name: str = None,
**kwargs,
):
super().__init__(num_heads, d_model, pdrop, scale, layers, activation, d_ff, d_k, rpr_k,
ffn_pdrop, layer_norms_after, layer_norm_eps, windowed_ra, rpr_value_on, layer_drop, name, **kwargs)
self.proj = WithDropout(tf.keras.layers.Dense(d_model), pdrop)
def call(self, inputs):
x, lengths = inputs
x = self.proj(x)
max_seqlen = get_shape_as_list(x)[1]
mask = tf.expand_dims(tf.expand_dims(tf.sequence_mask(lengths, max_seqlen, dtype=tf.float32), 1), 1)
return super().call((x, mask))
class TransformerEncoderStackWithTimeMask(TransformerEncoderStack):
def __init__(
self,
num_heads: int,
d_model: int,
pdrop: bool,
scale: bool = True,
layers: int = 1,
activation: str = "relu",
d_ff: Optional[int] = None,
d_k: Optional[int] = None,
rpr_k: Optional[Union[int, List[int]]] = None,
ffn_pdrop: Optional[float] = 0.0,
layer_norms_after: bool = False,
layer_norm_eps: float = 1.0e-6,
windowed_ra: Optional[bool] = False,
rpr_value_on: bool = True,
layer_drop: float = 0.0,
name: str = None,
**kwargs,
):
super().__init__(num_heads, d_model, pdrop, scale, layers, activation, d_ff, d_k, rpr_k,
ffn_pdrop, layer_norms_after, layer_norm_eps, windowed_ra, rpr_value_on, layer_drop, name,
**kwargs)
self.proj = WithDropout(tf.keras.layers.Dense(d_model), pdrop)
def call(self, inputs):
x, _ = inputs
x = self.proj(x)
max_seqlen = get_shape_as_list(x)[1]
mask = subsequent_mask(max_seqlen)
return super().call((x, mask))
class AttentionReduction(tf.keras.layers.Layer):
"""
This is a reduction that is given Q, K, V and a mask vector. Different from base reductions, which get an embedding stack
"""
def __init__(self, name=None):
super().__init__(name=name)
def call(self, qkvm: Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]) -> tf.Tensor:
"""Inputs are the same as for a normal attention function, but the output here is a single tensor, ``[B, H]``
:param query: a query for alignment. Can come from self in case of self-attn or decoder in case of E/D
:param key: a set of keys from encoder or self
:param value: a set of values from encoder or self
:param mask: masking (for destination) to prevent seeing what we shouldnt
:return: sentence-level encoding with dim [B, d_model]
"""
class SingleHeadReduction(AttentionReduction):
"""
Implementation of the "self_attention_head" layer from the conveRT paper (https://arxiv.org/pdf/1911.03688.pdf)
"""
def __init__(
self, d_model: int, dropout: float = 0.0, scale: bool = False, d_k: Optional[int] = None, pooling: str = 'sqrt_length', name: Optional[str] = None,
):
"""
:param d_model: The model hidden size
:param dropout (``float``): The amount of dropout to use
:param scale: should we scale the dot product attention
:param d_k: The low-order project per head. This is normally `d_model // num_heads` unless set explicitly
"""
super().__init__(name=name)
self.output_dim = d_model
if d_k is None:
self.d_k = d_model
else:
self.d_k = d_k
self.w_Q = tf.keras.layers.Dense(self.d_k, name="query_projection")
self.w_K = tf.keras.layers.Dense(self.d_k, name="key_projection")
if scale:
self.attn_fn = SeqScaledDotProductAttention(dropout)
else:
self.attn_fn = SeqDotProductAttention(dropout)
self.attn = None
pooling = pooling.lower()
self.fill = 0
if pooling == 'max':
self.pool = self._max_pool
self.fill = -1e9
elif pooling == 'mean':
self.pool = self._mean_pool
else:
self.pool = self._sqrt_length_pool
def _sqrt_length_pool(self, x, seq_lengths):
x = tf.reduce_sum(x, axis=1) # [B, D]
x = x * tf.expand_dims(tf.sqrt(tf.cast(seq_lengths, tf.float32)), -1)
return x
def _mean_pool(self, x, seq_lengths):
return tf.reduce_sum(x, 1) / tf.expand_dims(seq_lengths, -1)
def _max_pool(self, x, _):
x = tf.reduce_max(x, 1)
return x
def call(self, qkvm: Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]) -> tf.Tensor:
query, key, value, mask = qkvm
batchsz = get_shape_as_list(query)[0]
# (B, T, H, D) -> (B, H, T, D)
query = tf.transpose(tf.reshape(self.w_Q(query), [batchsz, -1, 1, self.d_k]), | |
0.25)
# The final RGB array :)
RGB = np.dstack([R, G, B])
ds["DayConvection"] = (("y", "x", "rgb"), RGB)
ds["rgb"] = ["R", "G", "B"]
ds["DayConvection"].attrs[
"Quick Guide"
] = "http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_GOESR_DayConvectionRGB_final.pdf"
ds["DayConvection"].attrs["long_name"] = "Day Convection"
return ds["DayConvection"]
def DayCloudConvection(self):
"""
Day Cloud Convection RGB:
(See `Quick Guide <http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_DayCloudConvectionRGB_final.pdf>`__ for reference)
.. image:: /_static/DayCloudConvection.png
"""
ds = self._obj
# Load the three channels into appropriate R, G, and B variables
R, G, B = self._load_RGB_channels((2, 2, 13))
# _normalize each channel by the appropriate range of values.
R = _normalize(R, 0, 1)
G = _normalize(G, 0, 1)
B = _normalize(B, -70.15, 49.85)
# Invert B
B = 1 - B
# Apply the gamma correction to Red channel.
# corrected_value = value^(1/gamma)
gamma = 1.7
R = _gamma_correction(R, gamma)
G = _gamma_correction(G, gamma)
# The final RGB array :)
RGB = np.dstack([R, G, B])
ds["DayCloudConvection"] = (("y", "x", "rgb"), RGB)
ds["rgb"] = ["R", "G", "B"]
ds["DayCloudConvection"].attrs[
"Quick Guide"
] = "http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_DayCloudConvectionRGB_final.pdf"
ds["DayCloudConvection"].attrs["long_name"] = "Day Cloud Convection"
return ds["DayCloudConvection"]
def DayLandCloud(self):
"""
Day Land Cloud Fire RGB:
(See `Quick Guide <http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_GOESR_daylandcloudRGB_final.pdf>`__ for reference)
.. image:: /_static/DayLandCloud.png
"""
ds = self._obj
# Load the three channels into appropriate R, G, and B variables
R, G, B = self._load_RGB_channels((5, 3, 2))
# _normalize each channel by the appropriate range of values e.g. R = (R-minimum)/(maximum-minimum)
R = _normalize(R, 0, 0.975)
G = _normalize(G, 0, 1.086)
B = _normalize(B, 0, 1)
# The final RGB array :)
RGB = np.dstack([R, G, B])
ds["DayLandCloud"] = (("y", "x", "rgb"), RGB)
ds["rgb"] = ["R", "G", "B"]
ds["DayLandCloud"].attrs[
"Quick Guide"
] = "http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_GOESR_daylandcloudRGB_final.pdf"
ds["DayLandCloud"].attrs["long_name"] = "Day Land Cloud"
return ds["DayLandCloud"]
def DayLandCloudFire(self):
"""
Day Land Cloud Fire RGB:
(See `Quick Guide <http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_GOESR_DayLandCloudFireRGB_final.pdf>`__ for reference)
.. image:: /_static/DayLandCloudFire.png
"""
ds = self._obj
# Load the three channels into appropriate R, G, and B variables
R, G, B = self._load_RGB_channels((6, 3, 2))
# _normalize each channel by the appropriate range of values e.g. R = (R-minimum)/(maximum-minimum)
R = _normalize(R, 0, 1)
G = _normalize(G, 0, 1)
B = _normalize(B, 0, 1)
# The final RGB array :)
RGB = np.dstack([R, G, B])
ds["DayLandCloudFire"] = (("y", "x", "rgb"), RGB)
ds["rgb"] = ["R", "G", "B"]
ds["DayLandCloudFire"].attrs[
"Quick Guide"
] = "http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_GOESR_DayLandCloudFireRGB_final.pdf"
ds["DayLandCloudFire"].attrs["long_name"] = "Day Land Cloud Fire"
return ds["DayLandCloud"]
def WaterVapor(self):
"""
Simple Water Vapor RGB:
(See `Quick Guide <http://rammb.cira.colostate.edu/training/visit/quick_guides/Simple_Water_Vapor_RGB.pdf>`__ for reference)
.. image:: /_static/WaterVapor.png
"""
ds = self._obj
# Load the three channels into appropriate R, G, and B variables.
R, G, B = self._load_RGB_channels((13, 8, 10))
# _normalize each channel by the appropriate range of values. e.g. R = (R-minimum)/(maximum-minimum)
R = _normalize(R, -70.86, 5.81)
G = _normalize(G, -58.49, -30.48)
B = _normalize(B, -28.03, -12.12)
# Invert the colors
R = 1 - R
G = 1 - G
B = 1 - B
# The final RGB array :)
RGB = np.dstack([R, G, B])
ds["WaterVapor"] = (("y", "x", "rgb"), RGB)
ds["rgb"] = ["R", "G", "B"]
ds["WaterVapor"].attrs[
"Quick Guide"
] = "http://rammb.cira.colostate.edu/training/visit/quick_guides/Simple_Water_Vapor_RGB.pdf"
ds["WaterVapor"].attrs["long_name"] = "Water Vapor"
return ds["WaterVapor"]
def DifferentialWaterVapor(self):
"""
Differential Water Vapor RGB:
(See `Quick Guide <http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_GOESR_DifferentialWaterVaporRGB_final.pdf>`__ for reference)
.. image:: /_static/DifferentialWaterVapor.png
"""
ds = self._obj
# Load the three channels into appropriate R, G, and B variables.
R = ds["CMI_C10"].data - ds["CMI_C08"].data
G = ds["CMI_C10"].data - 273.15
B = ds["CMI_C08"].data - 273.15
# _normalize each channel by the appropriate range of values. e.g. R = (R-minimum)/(maximum-minimum)
R = _normalize(R, -3, 30)
G = _normalize(G, -60, 5)
B = _normalize(B, -64.65, -29.25)
# Gamma correction
R = _gamma_correction(R, 0.2587)
G = _gamma_correction(G, 0.4)
B = _gamma_correction(B, 0.4)
# Invert the colors
R = 1 - R
G = 1 - G
B = 1 - B
# The final RGB array :)
RGB = np.dstack([R, G, B])
ds["DifferentialWaterVapor"] = (("y", "x", "rgb"), RGB)
ds["rgb"] = ["R", "G", "B"]
ds["DifferentialWaterVapor"].attrs[
"Quick Guide"
] = "http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_GOESR_DifferentialWaterVaporRGB_final.pdf"
ds["DifferentialWaterVapor"].attrs["long_name"] = "Differential Water Vapor"
return ds["DifferentialWaterVapor"]
def DaySnowFog(self):
"""
Day Snow-Fog RGB:
(See `Quick Guide <http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_DaySnowFog.pdf>`__ for reference)
.. image:: /_static/DaySnowFog.png
"""
ds = self._obj
# Load the three channels into appropriate R, G, and B variables
R = ds["CMI_C03"].data
G = ds["CMI_C05"].data
B = ds["CMI_C07"].data - ds["CMI_C13"].data
# _normalize values
R = _normalize(R, 0, 1)
G = _normalize(G, 0, 0.7)
B = _normalize(B, 0, 30)
# Apply a gamma correction to the image
gamma = 1.7
R = _gamma_correction(R, gamma)
G = _gamma_correction(G, gamma)
B = _gamma_correction(B, gamma)
# The final RGB array :)
RGB = np.dstack([R, G, B])
ds["DaySnowFog"] = (("y", "x", "rgb"), RGB)
ds["rgb"] = ["R", "G", "B"]
ds["DaySnowFog"].attrs[
"Quick Guide"
] = "http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_DaySnowFog.pdf"
ds["DaySnowFog"].attrs["long_name"] = "Day Snow Fog"
return ds["DaySnowFog"]
def NighttimeMicrophysics(self):
"""
Nighttime Microphysics RGB:
(See `Quick Guide <http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_GOESR_NtMicroRGB_final.pdf>`__ for reference)
.. image:: /_static/NighttimeMicrophysics.png
"""
ds = self._obj
# Load the three channels into appropriate R, G, and B variables
R = ds["CMI_C15"].data - ds["CMI_C13"].data
G = ds["CMI_C13"].data - ds["CMI_C07"].data
B = ds["CMI_C13"].data - 273.15
# _normalize values
R = _normalize(R, -6.7, 2.6)
G = _normalize(G, -3.1, 5.2)
B = _normalize(B, -29.6, 19.5)
# The final RGB array :)
RGB = np.dstack([R, G, B])
ds["NighttimeMicrophysics"] = (("y", "x", "rgb"), RGB)
ds["rgb"] = ["R", "G", "B"]
ds["NighttimeMicrophysics"].attrs[
"Quick Guide"
] = "http://rammb.cira.colostate.edu/training/visit/quick_guides/QuickGuide_GOESR_NtMicroRGB_final.pdf"
ds["NighttimeMicrophysics"].attrs["long_name"] = "Nighttime Microphysics"
return ds["NighttimeMicrophysics"]
def Dust(self):
"""
SulfurDioxide RGB:
(See `Quick Guide <http://rammb.cira.colostate.edu/training/visit/quick_guides/Dust_RGB_Quick_Guide.pdf>`__ for reference)
.. image:: /_static/Dust.png
"""
ds = self._obj
# Load the three channels into appropriate R, G, and B variables
R = ds["CMI_C15"].data - ds["CMI_C13"].data
G = ds["CMI_C14"].data - ds["CMI_C11"].data
B = ds["CMI_C13"].data - 273.15
# _normalize values
R = _normalize(R, -6.7, 2.6)
G = _normalize(G, -0.5, 20)
B = _normalize(B, -11.95, 15.55)
# Apply a gamma correction to the image
gamma = 2.5
G = _gamma_correction(G, gamma)
# The final RGB array :)
RGB = np.dstack([R, G, B])
ds["Dust"] = (("y", "x", "rgb"), RGB)
ds["rgb"] = ["R", "G", "B"]
ds["Dust"].attrs[
"Quick Guide"
] = "http://rammb.cira.colostate.edu/training/visit/quick_guides/Dust_RGB_Quick_Guide.pdf"
ds["Dust"].attrs["long_name"] = "Dust"
return ds["Dust"]
def SulfurDioxide(self):
"""
SulfurDioxide RGB:
(See `Quick Guide <http://rammb.cira.colostate.edu/training/visit/quick_guides/Quick_Guide_SO2_RGB.pdf>`__ for reference)
.. image:: /_static/SulfurDioxide.png
"""
ds = self._obj
# Load the three channels into appropriate R, G, and B variables
R = ds["CMI_C09"].data - ds["CMI_C10"].data
G = ds["CMI_C13"].data - ds["CMI_C11"].data
B = ds["CMI_C07"].data - 273.15
# _normalize values
R = _normalize(R, -4, 2)
G = _normalize(G, -4, 5)
B = _normalize(B, -30.1, 29.8)
# The final RGB array :)
RGB = np.dstack([R, G, B])
ds["SulfurDioxide"] = (("y", "x", "rgb"), RGB)
ds["rgb"] = ["R", "G", "B"]
ds["SulfurDioxide"].attrs[
"Quick Guide"
] = "http://rammb.cira.colostate.edu/training/visit/quick_guides/Quick_Guide_SO2_RGB.pdf"
ds["SulfurDioxide"].attrs["long_name"] = "<NAME>"
return ds["SulfurDioxide"]
def Ash(self):
"""
Ash RGB:
(See `Quick Guide <http://rammb.cira.colostate.edu/training/visit/quick_guides/GOES_Ash_RGB.pdf>`__ for reference)
.. image:: /_static/Ash.png
"""
ds = self._obj
# Load the three channels into appropriate R, G, and B variables
R = ds["CMI_C15"].data - ds["CMI_C13"].data
G = ds["CMI_C14"].data - ds["CMI_C11"].data
B = ds["CMI_C13"].data - 273.15
# _normalize values
R = _normalize(R, -6.7, 2.6)
G = _normalize(G, -6, 6.3)
B = _normalize(B, -29.55, 29.25)
# The final RGB array :)
RGB = np.dstack([R, G, B])
ds["Ash"] = (("y", "x", "rgb"), RGB)
ds["rgb"] = ["R", "G", "B"]
ds["Ash"].attrs[
"Quick Guide"
] = "http://rammb.cira.colostate.edu/training/visit/quick_guides/GOES_Ash_RGB.pdf"
ds["Ash"].attrs["long_name"] = "Ash"
return ds["Ash"]
def SplitWindowDifference(self):
"""
Split Window Difference RGB (greyscale):
(See `Quick Guide <http://cimss.ssec.wisc.edu/goes/OCLOFactSheetPDFs/ABIQuickGuide_SplitWindowDifference.pdf>`__ for reference)
.. image:: /_static/SplitWindowDifference.png
"""
ds = self._obj
# Load the three channels into appropriate R, G, and B variables
data = ds["CMI_C15"].data - ds["CMI_C13"].data
# _normalize values
data = _normalize(data, -10, 10)
# The final RGB array :)
RGB = np.dstack([data, data, data])
ds["SplitWindowDifference"] = (("y", "x", "rgb"), RGB)
ds["rgb"] = ["R", "G", "B"]
ds["SplitWindowDifference"].attrs[
"Quick Guide"
] = "http://cimss.ssec.wisc.edu/goes/OCLOFactSheetPDFs/ABIQuickGuide_SplitWindowDifference.pdf"
ds["SplitWindowDifference"].attrs["long_name"] = "Split Window Difference"
return ds["SplitWindowDifference"]
def NightFogDifference(self):
"""
Night Fog Difference RGB (greyscale):
(See `Quick Guide <http://cimss.ssec.wisc.edu/goes/OCLOFactSheetPDFs/ABIQuickGuide_NightFogBTD.pdf>`__ for reference)
.. image:: /_static/NightFogDifference.png
"""
ds = self._obj
# Load the three channels into appropriate R, G, and B variables
data = ds["CMI_C13"].data - ds["CMI_C07"].data
# _normalize | |
help='')
with self.argument_context('sites sitesonenotepagesparentnotebooksectionsparentsectiongroupsection copy-to-section-group') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('onenote_section_id1', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsection copy-to-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsection copy-to-section-group') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsectionpage copy-to-section') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('onenote_page_id1', type=str, help='key: id of onenotePage')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsectionpage onenote-patch-content') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('onenote_page_id1', type=str, help='key: id of onenotePage')
c.argument('commands', action=AddSitesOnenotePagesParentsectionPagesCommands, nargs='+', help='')
with self.argument_context('sites sitesonenotepagesparentsectionpage preview') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('onenote_page_id1', type=str, help='key: id of onenotePage')
with self.argument_context('sites sitesonenotepagesparentsectionparentnotebook copy-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('notebook_folder', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsectionparentnotebooksectiongroupsparentnotebook copy-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('notebook_folder', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsectionparentnotebooksectiongroupssection copy-to-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsectionparentnotebooksectiongroupssection copy-to-section-group') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsectionparentnotebooksection copy-to-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsectionparentnotebooksection copy-to-section-group') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsectiongroupparentnotebook copy-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('notebook_folder', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsectiongroupparentnotebooksection copy-to-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsectiongroupparentnotebooksection copy-to-section-group') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsectiongroupsection copy-to-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotepagesparentsectiongroupsection copy-to-section-group') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupsparentnotebook copy-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('notebook_folder', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupsparentnotebooksection copy-to-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupsparentnotebooksection copy-to-section-group') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupsparentnotebooksectionspage copy-to-section') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupsparentnotebooksectionspage onenote-patch-content') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('commands', action=AddSitesOnenoteSectiongroupsParentnotebookSectionsPagesCommands, nargs='+',
help='')
with self.argument_context('sites sitesonenotesectiongroupsparentnotebooksectionspage preview') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
with self.argument_context('sites sitesonenotesectiongroupsparentnotebooksectionspagesparentnotebook copy-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('notebook_folder', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupsparentnotebooksectionspagesparentsection copy-to-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupsparentnotebooksectionspagesparentsection copy-to-section-group') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupsparentnotebooksectionsparentnotebook copy-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('notebook_folder', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupssection copy-to-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupssection copy-to-section-group') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupssectionspage copy-to-section') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('id_', options_list=['--id'], type=str, help='')
c.argument('group_id', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupssectionspage onenote-patch-content') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('commands', action=AddSitesOnenoteSectiongroupsSectionsPagesCommands, nargs='+', help='')
with self.argument_context('sites sitesonenotesectiongroupssectionspage preview') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
with self.argument_context('sites sitesonenotesectiongroupssectionspagesparentnotebook copy-notebook') as c:
c.argument('site_id', type=str, help='key: id of site')
c.argument('section_group_id', type=str, help='key: id of sectionGroup')
c.argument('onenote_section_id', type=str, help='key: id of onenoteSection')
c.argument('onenote_page_id', type=str, help='key: id of onenotePage')
c.argument('group_id', type=str, help='')
c.argument('rename_as', type=str, help='')
c.argument('notebook_folder', type=str, help='')
c.argument('site_collection_id', type=str, help='')
c.argument('string_site_id', type=str, help='')
with self.argument_context('sites sitesonenotesectiongroupssectionspagesparentnotebooksection copy-to-notebook') as c:
c.argument('site_id', type=str, help='key: id of | |
<reponame>noahwaterfieldprice/alphago<filename>alphago/alphago.py
from collections import OrderedDict
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from .player import MCTSPlayer, RandomPlayer, OptimalPlayer
from .evaluator import evaluate
from .mcts_tree import MCTSNode, mcts
from .utilities import sample_distribution
__all__ = ["train_alphago", "self_play", "process_self_play_data",
"process_training_data"]
def compute_checkpoint_name(step, path):
return path + "{}.checkpoint".format(step)
def train_alphago(game, create_estimator, self_play_iters, training_iters,
checkpoint_path, summary_path, alphago_steps=100,
evaluate_every=1, batch_size=32, mcts_iters=100, c_puct=1.0,
replay_length=100000, num_evaluate_games=500,
win_rate=0.55, verbose=True, restore_step=None,
self_play_file_path=None):
"""Trains AlphaGo on the game.
Parameters
----------
game: object
An object that has the attributes a game needs.
create_estimator: func
Creates a trainable estimator for the game. The estimator should
have a train function.
self_play_iters: int
Number of self-play games to play each self-play step.
training_iters: int
Number of training iters to use for each training step.
checkpoint_path: str
Where to save the checkpoints to.
summary_path: str
Where to save the summaries (tensorboard) to.
alphago_steps: int
Number of steps to run the alphago loop for.
evaluate_every: int
Evaluate the network every evaluate_every steps.
batch_size: int
Batch size to train with.
mcts_iters: int
Number of iterations to run MCTS for.
c_puct: float
Parameter for MCTS. See AlphaGo paper.
replay_length: int
The amount of training data to use. Only train on the most recent
training data.
num_evaluate_games: int
Number of games to evaluate the players for.
win_rate: float
Number between 0 and 1. Only update self-play player when training
player beats self-play player by at least this rate.
verbose: bool
Whether or not to output progress.
restore_step: int or None
If given, restore the network from the checkpoint at this step.
self_play_file_path: str or None
Where to load self play data from, if given.
"""
# TODO: Do self-play, training and evaluating in parallel.
# We use a fixed estimator (the best one that's been trained) to
# generate self-play training data. We then train the training estimator
# on that data. We produce a checkpoint every 1000 training steps. This
# checkpoint is then evaluated against the current best neural network.
# If it beats the current best network by at least 55% then it becomes
# the new best network.
# 1 is the fixed player, and 2 is the training player.
self_play_estimator = create_estimator()
training_estimator = create_estimator()
graph = tf.Graph()
sess = tf.Session(graph=graph)
with graph.as_default():
tf_success_rate = tf.placeholder(
tf.float32, name='success_rate_summary')
success_rate_summary = tf.summary.scalar(
'success_rate_summary', tf_success_rate)
tf_success_rate_random = tf.placeholder(
tf.float32, name='success_rate_random')
success_rate_random_summary = tf.summary.scalar(
'success_rate_random', tf_success_rate_random)
#tf_success_rate_optimal = tf.placeholder(
# tf.float32, name='success_rate_optimal')
#success_rate_optimal_summary = tf.summary.scalar(
# 'success_rate_optimal', tf_success_rate_optimal)
#merged_summary = tf.summary.merge([success_rate_summary,
# success_rate_random_summary,
# success_rate_optimal_summary])
merged_summary = tf.summary.merge([success_rate_summary,
success_rate_random_summary])
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(summary_path)
if restore_step:
restore_path = compute_checkpoint_name(restore_step, checkpoint_path)
self_play_estimator.restore(restore_path)
training_estimator.restore(restore_path)
all_losses = []
self_play_data = None
initial_step = restore_step + 1 if restore_step else 0
for alphago_step in range(initial_step, initial_step + alphago_steps):
self_play_data = generate_self_play_data(
game, self_play_estimator, mcts_iters, c_puct, self_play_iters,
verbose=verbose, data=self_play_data)
training_data = process_training_data(self_play_data, replay_length)
if len(training_data) < 100:
continue
optimise_estimator(training_estimator, training_data, batch_size,
training_iters, writer=writer, verbose=verbose)
# Evaluate the players and choose the best.
if alphago_step % evaluate_every == 0:
success_rate, success_rate_random = \
evaluate_model(game, self_play_estimator,
training_estimator, mcts_iters, c_puct,
num_evaluate_games, verbose=verbose)
summary = sess.run(merged_summary,
feed_dict=
{tf_success_rate: success_rate,
tf_success_rate_random: success_rate_random})
writer.add_summary(summary, training_estimator.global_step)
checkpoint_model(training_estimator, alphago_step, checkpoint_path)
# If training player beats self-play player by a large enough
# margin, then it becomes the new best estimator.
if success_rate > win_rate:
# Create a new self player, with the weights of the most
# recent training_estimator.
if verbose:
print("Updating self-play player.")
print("Restoring from step: {}".format(alphago_step))
self_play_estimator = create_estimator()
restore_path = compute_checkpoint_name(alphago_step,
checkpoint_path)
self_play_estimator.restore(restore_path)
return all_losses
def optimise_estimator(estimator, training_data, batch_size, training_iters,
mode='reinforcement', writer=None, verbose=True):
summary = estimator.train(training_data, batch_size, training_iters,
mode=mode, writer=writer, verbose=verbose)
return summary
def evaluate_model(game, player1, player2, mcts_iters, c_puct, num_games,
verbose=True):
# Checkpoint the model.
# TODO: Implement evaluation
# TODO: Choose tau more systematically.
if verbose:
print("Evaluating. Self-player vs training, then training vs "
"self-player")
wins1, wins2, draws = evaluate_estimators_in_both_positions(
game, player1.create_estimate_fn(), player2.create_estimate_fn(),
mcts_iters, c_puct, num_games, tau=0.01, verbose=verbose)
if verbose:
print("Self-play player wins: {}, Training player wins: {}, "
"Draws: {}".format(wins1, wins2, draws))
success_rate = (wins2 + draws) / (wins1 + wins2 + draws)
if verbose:
print("Win + draw rate for training player: {}".format(
success_rate))
# Also evaluate against a random player
wins1, wins2, draws = evaluate_mcts_against_random_player(
game, player2.create_estimate_fn(), mcts_iters, c_puct, num_games,
tau=0.01, verbose=verbose)
success_rate_random = (wins1 + draws) / (wins1 + wins2 + draws)
if verbose:
print("Training player vs random. Wins: {}, Losses: {}, "
"Draws: {}".format(wins1, wins2, draws))
## Also evaluate against an optimal player
#wins1, wins2, draws = evaluate_mcts_against_optimal_player(
# game, player2.create_estimate_fn(), mcts_iters, c_puct, num_games,
# tau=0.1, verbose=verbose)
#success_rate_optimal = (wins1 + draws) / (wins1 + wins2 + draws)
#if verbose:
# print("Training player vs optimal. Wins: {}, Losses: {}, "
# "Draws: {}".format(wins1, wins2, draws))
#return success_rate, success_rate_random, success_rate_optimal
return success_rate, success_rate_random
def checkpoint_model(player, step, path):
"""Checkpoint the training player.
"""
checkpoint_name = compute_checkpoint_name(step, path)
player.save(checkpoint_name)
def evaluate_mcts_against_optimal_player(game, estimator, mcts_iters,
c_puct, num_evaluate_games, tau,
verbose=True):
# Evaluate estimator1 vs estimator2.
players = {1: MCTSPlayer(game, estimator, mcts_iters, c_puct, tau=tau),
2: OptimalPlayer(game)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 = player1_results[1]
wins2 = player1_results[-1]
draws = player1_results[0]
# Evaluate estimator2 vs estimator1.
players = {1: OptimalPlayer(game),
2: MCTSPlayer(game, estimator, mcts_iters, c_puct, tau=tau)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 += player1_results[-1]
wins2 += player1_results[1]
draws += player1_results[0]
return wins1, wins2, draws
def evaluate_mcts_against_random_player(game, estimator, mcts_iters,
c_puct, num_evaluate_games, tau,
verbose=True):
# Evaluate estimator1 vs estimator2.
players = {1: MCTSPlayer(game, estimator, mcts_iters, c_puct, tau=tau),
2: RandomPlayer(game)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 = player1_results[1]
wins2 = player1_results[-1]
draws = player1_results[0]
# Evaluate estimator2 vs estimator1.
players = {1: RandomPlayer(game),
2: MCTSPlayer(game, estimator, mcts_iters, c_puct, tau=tau)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 += player1_results[-1]
wins2 += player1_results[1]
draws += player1_results[0]
return wins1, wins2, draws
def evaluate_estimators_in_both_positions(game, estimator1, estimator2,
mcts_iters, c_puct,
num_evaluate_games, tau,
verbose=True):
# Evaluate estimator1 vs estimator2.
players = {1: MCTSPlayer(game, estimator1, mcts_iters, c_puct, tau=tau),
2: MCTSPlayer(game, estimator2, mcts_iters, c_puct, tau=tau)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 = player1_results[1]
wins2 = player1_results[-1]
draws = player1_results[0]
# Evaluate estimator2 vs estimator1.
players = {1: MCTSPlayer(game, estimator2, mcts_iters, c_puct, tau=tau),
2: MCTSPlayer(game, estimator1, mcts_iters, c_puct, tau=tau)}
player1_results, _ = evaluate(game, players, num_evaluate_games,
verbose=verbose)
wins1 += player1_results[-1]
wins2 += player1_results[1]
draws += player1_results[0]
return wins1, wins2, draws
def generate_self_play_data(game, estimator, mcts_iters, c_puct, num_iters,
data=None, verbose=True):
"""Generates self play data for a number of iterations for a given
estimator. Saves to save_file_path, if given.
"""
# if save_file_path is not None:
# with open(save_file_path, 'r') as f:
# data = json.load(save_file_path)
# index = max(data.keys()) + 1
if data is not None:
index = max(data.keys()) + 1
else:
data = OrderedDict()
index = 0
# Collect self-play training data using the best estimator.
disable_tqdm = False if verbose else True
for _ in tqdm(range(num_iters), disable=disable_tqdm):
data[index] = self_play(
game, estimator.create_estimate_fn(), mcts_iters, c_puct)
index += 1
# if save_file_path is not None:
# with open(save_file_path, 'w') as f:
# json.dump(data, f)
return data
def self_play(game, estimator, mcts_iters, c_puct):
"""Plays a single game using MCTS to choose actions for both players.
Parameters
----------
game: Game
An object representing the game to be played.
estimator: func
An estimate function.
mcts_iters: int
Number of iterations to run MCTS for.
c_puct: float
Parameter for MCTS.
Returns
-------
game_state_list: list
A list of game states encountered in the self-play game. Starts
with the initial state and ends with a terminal state.
action_probs_list: list
A list of action probability dictionaries, as returned by MCTS
each time the algorithm has to take an action. The ith action
probabilities dictionary corresponds to the ith game_state, and
action_probs_list has length one less than game_state_list,
since we don't have to move in a terminal state.
"""
node = MCTSNode(game.initial_state, game.current_player(game.initial_state))
game_state_list = [node.game_state]
action_probs_list = []
action_list = []
move_count = 0
while not node.is_terminal:
# TODO: Choose this better.
tau = 1
if move_count >= 10:
tau = 1 / (move_count - 10 + 1)
# First run MCTS | |
<reponame>savi-dev/keystone
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import sys
import eventlet.wsgi
import routes.middleware
import ssl
import webob.dec
import webob.exc
from keystone.common import logging
from keystone.common import utils
from keystone import exception
from keystone.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.DEBUG):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, application, host=None, port=None, threads=1000):
self.application = application
self.host = host or '0.0.0.0'
self.port = port or 0
self.pool = eventlet.GreenPool(threads)
self.socket_info = {}
self.greenthread = None
self.do_ssl = False
self.cert_required = False
def start(self, key=None, backlog=128):
"""Run a WSGI server with the given application."""
LOG.debug('Starting %(arg0)s on %(host)s:%(port)s' %
{'arg0': sys.argv[0],
'host': self.host,
'port': self.port})
socket = eventlet.listen((self.host, self.port), backlog=backlog)
if key:
self.socket_info[key] = socket.getsockname()
# SSL is enabled
if self.do_ssl:
if self.cert_required:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
sslsocket = eventlet.wrap_ssl(socket, certfile=self.certfile,
keyfile=self.keyfile,
server_side=True,
cert_reqs=cert_reqs,
ca_certs=self.ca_certs)
socket = sslsocket
self.greenthread = self.pool.spawn(self._run, self.application, socket)
def set_ssl(self, certfile, keyfile=None, ca_certs=None,
cert_required=True):
self.certfile = certfile
self.keyfile = keyfile
self.ca_certs = ca_certs
self.cert_required = cert_required
self.do_ssl = True
def kill(self):
if self.greenthread:
self.greenthread.kill()
def wait(self):
"""Wait until all servers have completed running."""
try:
self.pool.waitall()
except KeyboardInterrupt:
pass
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
log = logging.getLogger('eventlet.wsgi.server')
eventlet.wsgi.server(socket, application, custom_pool=self.pool,
log=WritableLogger(log))
class Request(webob.Request):
pass
class BaseApplication(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import nova.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls()
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(detail='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError('You must implement __call__')
class Application(BaseApplication):
@webob.dec.wsgify
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict.pop('action')
del arg_dict['controller']
LOG.debug('arg_dict: %s', arg_dict)
# allow middleware up the stack to provide context & params
context = req.environ.get('openstack.context', {})
context['query_string'] = dict(req.params.iteritems())
params = req.environ.get('openstack.params', {})
params.update(arg_dict)
# TODO(termie): do some basic normalization on methods
method = getattr(self, action)
# NOTE(vish): make sure we have no unicode keys for py2.6.
params = self._normalize_dict(params)
try:
result = method(context, **params)
except exception.Unauthorized as e:
LOG.warning("Authorization failed. %s from %s"
% (e, req.environ['REMOTE_ADDR']))
return render_exception(e)
except exception.Error as e:
LOG.warning(e)
return render_exception(e)
except Exception as e:
logging.exception(e)
return render_exception(exception.UnexpectedError(exception=e))
if result is None:
return render_response(status=(204, 'No Content'))
elif isinstance(result, basestring):
return result
elif isinstance(result, webob.Response):
return result
elif isinstance(result, webob.exc.WSGIHTTPException):
return result
return render_response(body=result)
def _normalize_arg(self, arg):
return str(arg).replace(':', '_').replace('-', '_')
def _normalize_dict(self, d):
return dict([(self._normalize_arg(k), v)
for (k, v) in d.iteritems()])
def assert_admin(self, context):
if not context['is_admin']:
try:
user_token_ref = self.token_api.get_token(
context=context, token_id=context['token_id'])
except exception.TokenNotFound:
raise exception.Unauthorized()
creds = user_token_ref['metadata'].copy()
try:
creds['user_id'] = user_token_ref['user'].get('id')
except AttributeError:
logging.debug('Invalid user')
raise exception.Unauthorized()
try:
creds['tenant_id'] = user_token_ref['tenant'].get('id')
except AttributeError:
logging.debug('Invalid tenant')
raise exception.Unauthorized()
# NOTE(vish): this is pretty inefficient
creds['roles'] = [self.identity_api.get_role(context, role)['name']
for role in creds.get('roles', [])]
# Accept either is_admin or the admin role
self.policy_api.enforce(context, creds, 'admin_required', {})
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, request):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, request, response):
"""Do whatever you'd like to the response, based on the request."""
return response
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
response = self.process_request(request)
if response:
return response
response = request.get_response(self.application)
return self.process_response(request, response)
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
for key, value in req.environ.items():
LOG.debug('%s = %s', key, value)
LOG.debug('')
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20))
for line in req.body_file:
LOG.debug(line)
LOG.debug('')
resp = req.get_response(self.application)
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
for (key, value) in resp.headers.iteritems():
LOG.debug('%s = %s', key, value)
LOG.debug('')
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20))
for part in app_iter:
LOG.debug(part)
yield part
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def | |
<reponame>chrisspen/homebot<filename>src/ros/src/ros_homebot/nodes/arduino_relay.py
#!/usr/bin/env python
from __future__ import print_function
#import time
from math import pi
# import traceback
import os
import sys
import time
import threading
# import cPickle as pickle
from functools import partial
#from math import pi, sin, cos
#import numpy as np
import rospy
import tf
#import tf2_ros
#http://docs.ros.org/api/sensor_msgs/html/msg/Imu.html
from sensor_msgs.msg import Imu
#http://wiki.ros.org/std_msgs
from sensor_msgs.msg import JointState
from std_msgs.msg import Header, String, UInt16MultiArray, Bool, Int16
#from std_srvs.srv import Empty, EmptyResponse
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Quaternion, Point
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus#, KeyValue
from ros_homebot_python.node import say
from ros_homebot_python import constants as c
#from ros_homebot_python.node import BaseArduinoNode
OK = DiagnosticStatus.OK # 0
WARN = DiagnosticStatus.WARN # 1
ERROR = DiagnosticStatus.ERROR # 2
STALE = DiagnosticStatus.STALE # 3
IMU_CALIBRATION_FN = 'imu_calibration.pickle'
V0 = 'v0'
V1 = 'v1'
status_id_to_name = {
OK: 'OK',
WARN: 'WARN',
ERROR: 'ERROR',
STALE: 'STALE',
}
def ltof(values):
"""
Converts the special integer-encoded doubles back into Python floats.
See the Arduino's equivalent ftol().
"""
assert isinstance(values, (tuple, list))
return [int(_)/1000. for _ in values]
# Based on https://goo.gl/mY0th1
# http://wiki.ros.org/tf2/Tutorials/Writing%20a%20tf2%20broadcaster%20%28Python%29
# http://wiki.ros.org/navigation/Tutorials/RobotSetup/Odom
# http://answers.ros.org/question/79851/python-odometry/
class ArduinoRelay:
cache_dir = '~/.homebot_cache/torso_relay'
# Covariance
#P = np.mat(np.diag([0.0]*3))
def __init__(self):
rospy.init_node('arduino_relay')
self._imu_data = {}
self._lock = threading.RLock()
# self.imu_calibration_loaded = False
# self.imu_calibration_loaded_time = rospy.Time(0)
self.diagnostics_msg_count = 0
self.diagnostics_buffer = []
# http://wiki.ros.org/tf/Tutorials/Writing%20a%20tf%20broadcaster%20%28Python%29
self.tf_br = tf.TransformBroadcaster()
#self.tf2_br = tf2_ros.TransformBroadcaster()
#rospy.Service('~reset_odometry', Empty, self.reset_odometry)
## Publishers.
self.diagnostics_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size=10)
self.odometry_pub = rospy.Publisher('/odom', Odometry, queue_size=10)
self.imu_calibration_load_pub = rospy.Publisher('/torso_arduino/imu_calibration_load', UInt16MultiArray, queue_size=1)
self.imu_pub = rospy.Publisher('/imu_data', Imu, queue_size=10)
self.joint_pub = rospy.Publisher('joint_states', JointState, queue_size=10)
self._joint_pub_lock = threading.RLock()
self.last_pan_angle = None
self.last_tilt_angle = None
self.received_angles = False
self._joint_pub_thread = threading.Thread(target=self.publish_joints_thread)
self._joint_pub_thread.daemon = True
self._joint_pub_thread.start()
## Head Subscribers.
rospy.Subscriber('/head_arduino/diagnostics_relay', String, partial(self.on_diagnostics_relay, prefix='head'))
rospy.Subscriber('/head_arduino/pan_degrees', Int16, self.on_head_pan_degrees)
rospy.Subscriber('/head_arduino/tilt_degrees', Int16, self.on_head_tilt_degrees)
## Torso Subscribers.
rospy.Subscriber('/torso_arduino/diagnostics_relay', String, partial(self.on_diagnostics_relay, prefix='torso'))
# rospy.Subscriber('/torso_arduino/imu_calibration_save', UInt16MultiArray, self.on_imu_calibration_save)
rospy.Subscriber('/torso_arduino/imu_relay', String, self.on_imu_relay)
rospy.Subscriber('/torso_arduino/odometry_relay', String, self.on_odometry_relay)
rospy.Subscriber('/torso_arduino/power_shutdown', Bool, self.on_power_shutdown)
self._odom_lock = threading.RLock()
self.pos = None
self.ori = None
# self._motor_target_left = None
# self._motor_target_right = None
# rospy.Subscriber('/torso_arduino/motor_target_a', Int16, partial(self.on_motor_target, side='left'))
# rospy.Subscriber('/torso_arduino/motor_target_b', Int16, partial(self.on_motor_target, side='right'))
# self._motor_encoder_left = None
# self._motor_encoder_right = None
# rospy.Subscriber('/torso_arduino/motor_encoder_a', Int16, partial(self.on_motor_encoder, side='left'))
# rospy.Subscriber('/torso_arduino/motor_encoder_b', Int16, partial(self.on_motor_encoder, side='right'))
## Begin IO.
rospy.spin()
@property
def imu_calibration_filename(self):
cache_dir = os.path.expanduser(self.cache_dir)
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
fn = os.path.join(cache_dir, IMU_CALIBRATION_FN)
return fn
# def load_imu_calibration(self):
# """
# Automatically called once after the first diagnostic message is received.
# Per Adafruit's documentation:
# "One thing to keep in mind though is that the sensor isn't necessarily 'plug and play' with
# loading the calibration data, in particular the magnetometer needs to be recalibrated even if
# the offsets are loaded. The magnetometer calibration is very dynamic so saving the values
# once might"
# """
# try:
# fn = self.imu_calibration_filename
# if os.path.isfile(fn):
# rospy.loginfo('Loading imu calibration %s...' % fn)
# with open(fn, 'r') as fin:
# msg = pickle.load(fin)
# rospy.loginfo('Sending calibration:')
# rospy.loginfo(msg)
# self.imu_calibration_load_pub.publish(msg)
# self.imu_calibration_loaded = True
# self.imu_calibration_loaded_time = rospy.Time.now()
# rospy.loginfo('Sent.')
# else:
# rospy.loginfo('No saved imu calibration.')
# except Exception as exc:
# traceback.print_exc()
# finally:
# self.imu_calibration_loaded = True
def on_head_pan_degrees(self, msg):
"""
Re-publishes the pan angle as a standard JointState message.
"""
# rospy.loginfo('pan: %s' % msg.data)
with self._joint_pub_lock:
self.last_pan_angle = msg.data*pi/180.
self.received_angles = True
self.publish_joints()
def on_head_tilt_degrees(self, msg):
"""
Re-publishes the tilt angle as a standard JointState message.
"""
# rospy.loginfo('tilt: %s' % msg.data)
with self._joint_pub_lock:
self.last_tilt_angle = msg.data*pi/180.
self.received_angles = True
self.publish_joints()
def publish_joints_thread(self):
time.sleep(3)
while 1:
if self.received_angles:
self.publish_joints()
time.sleep(1)
def publish_joints(self):
if self.last_tilt_angle is None:
rospy.logwarn('No tilt angle, aborting joint publish.')
return
if self.last_pan_angle is None:
rospy.logwarn('No pan angle, aborting joint publish.')
return
with self._joint_pub_lock:
joint_state = JointState()
joint_state.header = Header()
joint_state.header.stamp = rospy.Time.now()
joint_state.name = [
c.FOOTPRINT_TO_TORSO_JOINT,
c.TORSO_TO_NECK_JOINT,
c.NECK_TO_HEAD_JOINT,
c.HEAD_TO_CAMERA_JOINT,
]
joint_state.position = [
0,
self.last_pan_angle,
self.last_tilt_angle,
0,
]
joint_state.velocity = []
joint_state.effort = []
self.joint_pub.publish(joint_state)
def on_power_shutdown(self, msg):
rospy.loginfo('Received shutdown signal. Issuing halt command in 3 seconds...')
try:
say(c.SYSTEM_SHUTDOWN_SPEECH)
except Exception as exc: # pylint: disable=broad-except
rospy.logerr('Unable to speak about shutdown: %s', exc)
time.sleep(3)
os.system('sudo halt')
# After halt is performed, all ROS nodes will be killed.
# The torso Arduino will then wait a few seconds to allow Linux to clean up all processes, and then it will kill all system power.
# See the deadman flag that triggers the call to power_controller.shutdown().
# def on_imu_calibration_save(self, msg):
# #print('Received imu calibration:', msg)
# if sum(msg.data) == 0:
# rospy.logwarn('Ignoring blank calibration.')
# self.load_imu_calibration()
# return
# fn = self.imu_calibration_filename
# with open(fn, 'w') as fout:
# pickle.dump(msg, fout)
def on_imu_relay(self, msg):
parts = msg.data.split(':')
# Validate type.
typ = parts[0]
assert typ in 'aeg', 'Invalid typ: %s' % typ
# Convert the integers to the original floats.
nums = ltof(parts[1:])
for num, axis in zip(nums, 'xyz'):
self._imu_data['%s%s' % (typ, axis)] = num
# If we've received the final segment, re-publish the complete IMU message.
if typ == 'a':
# https://docs.ros.org/api/sensor_msgs/html/msg/Imu.html
imu_msg = Imu()
imu_msg.header = Header()
imu_msg.header.stamp = rospy.Time.now()
imu_msg.header.frame_id = c.BASE_LINK #TODO
# Our sensor returns Euler angles in degrees, but ROS requires radians.
# http://answers.ros.org/question/69754/quaternion-transformations-in-python/
roll = self._imu_data['ex']
pitch = self._imu_data['ey']
yaw = self._imu_data['ez']
quaternion = tf.transformations.quaternion_from_euler(roll, pitch, yaw)
imu_msg.orientation.x = quaternion[0]
imu_msg.orientation.y = quaternion[1]
imu_msg.orientation.z = quaternion[2]
imu_msg.orientation.w = quaternion[3]
imu_msg.orientation_covariance = [1, 0.001, 0.001, 0.001, 1, 0.001, 0.001, 0.001, 1]
imu_msg.angular_velocity.x = self._imu_data['gx']
imu_msg.angular_velocity.y = self._imu_data['gy']
imu_msg.angular_velocity.z = self._imu_data['gz']
imu_msg.angular_velocity_covariance = [1, 0.001, 0.001, 0.001, 1, 0.001, 0.001, 0.001, 1]
imu_msg.linear_acceleration.x = self._imu_data['ax']
imu_msg.linear_acceleration.y = self._imu_data['ay']
imu_msg.linear_acceleration.z = self._imu_data['az']
imu_msg.linear_acceleration_covariance = [1, 0.001, 0.001, 0.001, 1, 0.001, 0.001, 0.001, 1]
self.imu_pub.publish(imu_msg)
def on_diagnostics_relay(self, msg, prefix):
"""
The Arduino has limited RAM and an even more limited serial buffer, so it can't send complex ROS structures like DiagnosticArrays.
So instead, it publishes diagnostic data via a key/value pair formatted in a simple string,
which we convert to a proper diagnostic message.
"""
#print('diagnostics.msg:', msg)
self.diagnostics_msg_count += 1
# if (rospy.Time.now() - self.imu_calibration_loaded_time).secs >= 300:
# self.load_imu_calibration()
# Aggregate single-character messages into a complete message.
# if not msg.data:
# #print('Received empty message.')
# return
# elif msg.data[0] == '^':
# #print('Received message start.')
# self.diagnostics_buffer = []
# return
# elif msg.data[0] == '$':
# #print('Received message end.')
# msg.data = ''.join(self.diagnostics_buffer)
# else:
# #print('Recieved %i chars.' % len(msg.data))
# self.diagnostics_buffer.append(msg.data)
# return
# Extract parts.
# print('Message length:', len(msg.data))
# print('Message data:', msg.data)
parts = msg.data.split(':')
if len(parts) < 2:
rospy.logerr('Malformed diagnostics message.', file=sys.stderr)
return
# Complete name part.
name = '%s: %s' % (prefix, parts[0].strip())
# Complete level part.
try:
level = int(parts[1]) # OK|WARN|ERROR|STALE
assert level in range(4)
except (TypeError, ValueError, AssertionError) as exc:
rospy.logerr('Malformed level: "%s"', parts[1])
return
# Complete message part.
message = ''
if len(parts) >= 3:
message = parts[2].strip()
if message == '?':
message = ''
if not message:
# If not given, default the message to the name equivalent of the level.
message = status_id_to_name.get(level, '')
# Construct and send diagnostics array.
# http://docs.ros.org/api/diagnostic_msgs/html/msg/DiagnosticStatus.html
array = DiagnosticArray()
array.status = [
DiagnosticStatus(name=name, level=level, message=message)
]
with self._lock:
self.diagnostics_pub.publish(array)
def on_odometry_relay(self, msg):
parts = msg.data.split(':')
if len(parts) < 5:
rospy.logerr('Malformed odometry message.', file=sys.stderr)
return
# Validate type.
typ = parts[0]
assert typ in (V0, V1), 'Invalid type: %s' % typ
# Validate numbers.
nums = ltof(parts[1:])
# Save type parts.
if typ == V0:
# Position.
# x,y,z,th
self._odometry_v0 = nums
else:
# Velocity.
# vx,vy,vz,vth
self._odometry_v1 = nums
# Combine and publish a complete odometry message on the receipt of the last part.
if typ == V1:
current_time = rospy.Time.now()
# print('position:', self._odometry_v0)
x, y, z, th = self._odometry_v0
# print('velocity:', self._odometry_v1)
vx, vy, vz, vth = self._odometry_v1
# since all odometry is 6DOF we'll need a quaternion created from yaw
#geometry_msgs::Quaternion odom_quat = tf::createQuaternionMsgFromYaw(th);
odom_quat = Quaternion(*tf.transformations.quaternion_from_euler(0, 0, th))
# https://docs.ros.org/kinetic/api/nav_msgs/html/msg/Odometry.html
msg = Odometry()
msg.header.stamp = current_time
msg.header.frame_id = c.ODOM
msg.child_frame_id = c.BASE_LINK
msg.pose.pose.position = Point(x, y, z)
msg.pose.pose.orientation = odom_quat
msg.pose.covariance = [
1, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 1, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 1, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 1, 0.001, 0.001,
| |
self.am.get_active_char()
if char is not None:
try:
mc.setAttr( char + '.jointRadius', self.jointRadius.value() )
except:
pass
def set_joint_display(self):
mode = self.jointMode.currentIndex()
if self.char is not None:
try:
mc.setAttr ( self.char + '.display_Joint', mode )
except:
pass
def set_geo_display(self):
mode = self.geoMode.currentIndex()
if self.char is not None:
try:
mc.setAttr ( self.char + '.display_Geo', mode )
except:
pass
def show_character(self, *args):
char = self.am.get_active_char( )
if char is not None:
try:
mc.setAttr( char + '.v', int(self.showChar.checkState())/2 )
except:
pass
else:
self.ui_update()
def show_rig(self, *args):
char = self.am.get_active_char()
if char is not None:
try:
mc.setAttr( char + '.show_Rig', int(self.showRig.checkState())/2 )
except:
pass
def show_geo(self, *args):
char = self.am.get_active_char()
if char is not None:
try:
mc.setAttr( char + '.show_Geo', int(self.showGeo.checkState())/2 )
except:
pass
def show_joints(self, *args):
char = self.am.get_active_char()
if char is not None:
try:
mc.setAttr( char + '.show_Joints', int(self.showJoints.checkState())/2 )
except:
pass
def show_guides(self, *args):
char = self.am.get_active_char()
if char is not None:
try:
mc.setAttr( char + '.show_Guides', int(self.showGuides.checkState())/2 )
except:
pass
def show_mocap(self, *args):
char = self.am.get_active_char()
if char is not None:
try:
mc.setAttr( char + '.show_Mocap', int(self.showMocap.checkState())/2 )
except:
pass
def show_upVecs(self, *args):
char = self.am.get_active_char()
if char is not None:
try:
mc.setAttr( char + '.show_UpVectors', int(self.showUpVecs.checkState())/2 )
except:
pass
def update_value_widget( self, char, widget, attribute ):
try:
gs = mc.getAttr( char + '.' + attribute )
widget.setValue( gs )
except:
#mc.warning('aniMeta: Can not find globalScale attribute.')
pass
def update_state_widget( self, char, widget, attribute ):
try:
sj = mc.getAttr( char + '.' + attribute )
widget.setCheckState(self.get_state(sj))
except:
#mc.warning('aniMeta: Can not find globalScale attribute.')
pass
def update_enum_widget( self, char, widget, attribute ):
try:
sj = mc.getAttr( char + '.' + attribute )
widget.setCurrentIndex(sj)
except:
#mc.warning('aniMeta: Can not find globalScale attribute.')
pass
def ui_update( self, char=None ):
if char is None:
char = self.am.get_active_char()
if char is not None:
if mc.objExists( char ):
self.ui_enable( True )
v = mc.getAttr(char + '.v')
data = self.am.get_metaData(char)
######################################################################
# Update the Display Options Widgets
if 'RigState' in data:
state = data['RigState']
self.pickerWidget.setEnabled( state-1 )
self.update_value_widget( char, self.globalScale, 'globalScale' )
self.update_value_widget( char, self.ctrlScale, 'globalCtrlScale' )
self.update_value_widget( char, self.jointRadius, 'jointRadius' )
self.update_state_widget( char, self.showJoints, 'show_Joints' )
self.update_state_widget( char, self.showGuides, 'show_Guides' )
self.update_state_widget( char, self.showRig, 'show_Rig' )
self.update_state_widget( char, self.showGeo, 'show_Geo' )
self.update_state_widget( char, self.showMocap, 'show_Mocap' )
self.update_state_widget( char, self.showUpVecs, 'show_UpVectors' )
self.update_enum_widget( char, self.jointMode, 'display_Joint' )
self.update_enum_widget( char, self.geoMode, 'display_Geo' )
self.showChar.setCheckState( self.get_state(v) )
# Save the current char
self.char = char
metaData = self.am.get_metaData(char)
if 'RigState' in metaData:
rigState = metaData['RigState']
if rigState == kRigStateControl:
self.modeControls.setEnabled(False)
self.modeControls.setStyleSheet(self.style_active)
self.modeControls.setToolTip( 'Rig is in control mode.')
self.modeGuides.setEnabled(True)
self.modeGuides.setStyleSheet(self.style_not_active)
self.modeGuides.setToolTip( 'Switch the rig to guide mode.')
self.lockGuides1.setEnabled(False)
self.lockGuides2.setEnabled(False)
self.lockGuides3.setEnabled(False)
else:
self.modeControls.setEnabled(True)
self.modeControls.setStyleSheet(self.style_not_active)
self.modeControls.setToolTip( 'Switch the rig to control mode.')
self.modeGuides.setEnabled(False)
self.modeGuides.setStyleSheet(self.style_active)
self.modeGuides.setToolTip( 'Rig is in guide mode.')
self.lockGuides1.setEnabled(True)
self.lockGuides2.setEnabled(True)
self.lockGuides3.setEnabled(True)
# Arm IK R
if self.arm_mode_R == kFK:
self.button_ik_arm_R.setEnabled( True )
self.button_ik_arm_R.setStyleSheet(self.style_not_active)
self.button_fk_arm_R.setEnabled( False )
self.button_fk_arm_R.setStyleSheet(self.style_active)
for widget in self.buttons_arm_ik_R:
widget.setVisible( False )
for widget in self.buttons_arm_fk_R:
widget.setVisible( True )
else:
self.button_ik_arm_R.setEnabled( False )
self.button_ik_arm_R.setStyleSheet(self.style_active)
self.button_fk_arm_R.setEnabled( True )
self.button_fk_arm_R.setStyleSheet(self.style_not_active)
for widget in self.buttons_arm_ik_R:
widget.setVisible( True )
for widget in self.buttons_arm_fk_R:
widget.setVisible( False )
# Arm IK L
if self.arm_mode_L == kFK:
self.button_ik_arm_L.setEnabled( True )
self.button_ik_arm_L.setStyleSheet(self.style_not_active)
self.button_fk_arm_L.setEnabled( False )
self.button_fk_arm_L.setStyleSheet(self.style_active)
for widget in self.buttons_arm_ik_L:
widget.setVisible( False )
for widget in self.buttons_arm_fk_L:
widget.setVisible( True )
else:
self.button_ik_arm_L.setEnabled( False )
self.button_ik_arm_L.setStyleSheet(self.style_active)
self.button_fk_arm_L.setEnabled( True )
self.button_fk_arm_L.setStyleSheet(self.style_not_active)
for widget in self.buttons_arm_ik_L:
widget.setVisible( True )
for widget in self.buttons_arm_fk_L:
widget.setVisible( False )
# Leg IK R
if self.leg_mode_R == kFK:
self.button_ik_leg_R.setEnabled( True )
self.button_ik_leg_R.setStyleSheet(self.style_not_active)
self.button_fk_leg_R.setEnabled( False )
self.button_fk_leg_R.setStyleSheet(self.style_active)
for widget in self.buttons_leg_ik_R:
widget.setVisible( False )
for widget in self.buttons_leg_fk_R:
widget.setVisible( True )
else:
self.button_ik_leg_R.setEnabled( False )
self.button_ik_leg_R.setStyleSheet(self.style_active)
self.button_fk_leg_R.setEnabled( True )
self.button_fk_leg_R.setStyleSheet(self.style_not_active)
for widget in self.buttons_leg_ik_R:
widget.setVisible( True )
for widget in self.buttons_leg_fk_R:
widget.setVisible( False )
# Leg IK L
if self.leg_mode_L == kFK:
self.button_ik_leg_L.setEnabled( True )
self.button_ik_leg_L.setStyleSheet(self.style_not_active)
self.button_fk_leg_L.setEnabled( False )
self.button_fk_leg_L.setStyleSheet(self.style_active)
for widget in self.buttons_leg_ik_L:
widget.setVisible( False )
for widget in self.buttons_leg_fk_L:
widget.setVisible( True )
else:
self.button_ik_leg_L.setEnabled( False )
self.button_ik_leg_L.setStyleSheet(self.style_active)
self.button_fk_leg_L.setEnabled( True )
self.button_fk_leg_L.setStyleSheet(self.style_not_active)
for widget in self.buttons_leg_ik_L:
widget.setVisible( True )
for widget in self.buttons_leg_fk_L:
widget.setVisible( False )
self.pickerLayout.update()
else:
self.ui_enable( False )
else:
self.ui_enable( False )
def ui_enable(self, mode):
self.showChar.setEnabled(mode)
self.showJoints.setEnabled(mode)
self.showGeo.setEnabled(mode)
self.showRig.setEnabled(mode)
self.showMocap.setEnabled(mode)
self.showUpVecs.setEnabled(mode)
self.globalScale.setEnabled(mode)
self.ctrlScale.setEnabled(mode)
self.jointRadius.setEnabled(mode)
self.jointMode.setEnabled(mode)
self.geoMode.setEnabled(mode)
self.modeControls.setEnabled(mode)
self.modeGuides.setEnabled(mode)
self.lockGuides1.setEnabled(mode)
self.lockGuides2.setEnabled(mode)
self.lockGuides3.setEnabled(mode)
self.pickerWidget.setEnabled(mode)
kLibPose, kLibAnim, kLibRig = range(3)
class LibTab(QWidget):
charList = None
resized = Signal()
pose_path = None
pose = None
def __init__(self, *argv, **keywords):
super(LibTab, self).__init__( )
self.am = AniMeta()
self.rig = Rig()
mainLayout = QVBoxLayout( self )
self.setLayout(mainLayout)
self.menu = QMenu( self )
l_widget = QWidget()
self.l = QVBoxLayout( l_widget )
self.scrollArea = QScrollArea()
self.layout().addWidget( self.scrollArea )
self.scrollArea.setWidget( l_widget )
self.scrollArea.setWidgetResizable(True)
self.kPose, self.kAnim = range(2)
self.pose_column_count = 3
self.anim_column_count = 3
self.rig_column_count = 3
self.pose_root = self.am.folder_pose
self.pose_path = self.am.folder_pose
self.anim_root = self.am.folder_anim
self.anim_path = self.am.folder_anim
self.rig_root = self.am.folder_rig
self.rig_path = self.am.folder_rig
self.pose_grid = QGridLayout()
self.anim_grid = QGridLayout()
self.rig_grid = QGridLayout()
self.button_height = 28
self.button_width = 128
self.poses()
self.anims()
self.rigs()
for section in [kLibPose, kLibAnim, kLibRig ]:
self.tree_refresh( section )
self.refresh( section )
self.l.addStretch()
def resizeEvent( self, event ):
self.check_columns( kLibPose )
self.check_columns( kLibAnim )
self.check_columns( kLibRig )
def poses( self ):
########################################
# Poses
self.pose_frame = FrameWidget( 'Poses', None, 600 )
self.l.addWidget( self.pose_frame )
widget = QWidget( self.pose_frame )
vLayout = QVBoxLayout( self )
self.pose_frame.setLayout( vLayout )
hLayout = QHBoxLayout( self )
vLayout.addLayout( hLayout )
# Buttons
button1 = QPushButton( 'Save' )
button2 = QPushButton( 'Load' )
button1.clicked.connect( self.pose_export_dialog )
for button in [ button1, button2 ]:
button.setMinimumWidth( self.button_width )
button.setMaximumWidth( self.button_width )
button.setMinimumHeight( self.button_height )
button.setMaximumHeight( self.button_height )
hLayout.addWidget( button1 )
hLayout.addWidget( button2 )
hLayout.addStretch()
# Split Layout
self.pose_split = QSplitter( Qt.Horizontal )
self.pose_split.splitterMoved.connect( partial( self.check_columns, kLibPose ) )
# Tree View
self.pose_tree_scroll = QScrollArea()
#self.pose_tree_scroll.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOn )
self.pose_tree_scroll.setHorizontalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
self.pose_tree_scroll.setWidgetResizable( True )
self.pose_tree_view = aniMetaTreeWidget()
self.pose_tree_view.setHeaderLabel( 'Pose Folders' )
self.pose_tree_view.installEventFilter( self )
self.pose_tree_view.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.pose_tree_view.customContextMenuRequested.connect( self.pose_tree_ctx_menu )
self.pose_tree_scroll.setWidget( self.pose_tree_view )
self.pose_tree_view.selectionModel().selectionChanged.connect( partial ( self.tree_select, kLibPose ) )
self.pose_split.addWidget( self.pose_tree_scroll )
# Pose Panel
self.pose_scroll = QScrollArea()
self.pose_scroll.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOn )
self.pose_scroll.setHorizontalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
self.pose_scroll.setWidgetResizable( True )
self.pose_split.addWidget( self.pose_scroll )
vLayout.addWidget( self.pose_split )
self.pose_frame.setCollapsed( True )
# Poses
########################################
########################################
# Animation
def anims( self ):
self.anim_frame = FrameWidget( 'Animation', None, 600 )
self.l.addWidget( self.anim_frame )
widget = QWidget( self.anim_frame )
vLayout = QVBoxLayout( self )
self.anim_frame.setLayout( vLayout )
hLayout = QHBoxLayout( self )
vLayout.addLayout( hLayout )
# Buttons
button1 = QPushButton( 'Save' )
button2 = QPushButton( 'Load' )
button1.clicked.connect( self.export_anim_dialog )
for button in [ button1, button2 ]:
button.setMinimumWidth( self.button_width )
button.setMaximumWidth( self.button_width )
button.setMinimumHeight( self.button_height )
button.setMaximumHeight( self.button_height )
hLayout.addWidget( button1 )
hLayout.addWidget( button2 )
hLayout.addStretch()
# Split Layout
self.anim_split = QSplitter( Qt.Horizontal )
self.anim_split.splitterMoved.connect( partial ( self.check_columns, kLibAnim ) )
# Tree View
self.anim_tree_scroll = QScrollArea()
#self.anim_tree_scroll.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOn )
self.anim_tree_scroll.setHorizontalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
self.anim_tree_scroll.setWidgetResizable( True )
self.anim_tree_view = aniMetaTreeWidget()
self.anim_tree_view.setHeaderLabel( 'Animation Folders' )
self.anim_tree_view.installEventFilter( self )
self.anim_tree_view.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.anim_tree_view.customContextMenuRequested.connect( self.anim_tree_ctx_menu )
self.anim_tree_scroll.setWidget( self.anim_tree_view )
self.anim_tree_view.selectionModel().selectionChanged.connect( partial ( self.tree_select, kLibAnim ) )
self.anim_split.addWidget( self.anim_tree_scroll )
# Animation Panel
self.anim_scroll = QScrollArea()
self.anim_scroll.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOn )
self.anim_scroll.setHorizontalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
self.anim_scroll.setWidgetResizable( True )
self.anim_split.addWidget( self.anim_scroll )
vLayout.addWidget( self.anim_split )
self.anim_frame.setCollapsed( True )
# Animation
########################################
########################################
# Rig Settings
def rigs( self ):
self.rig_frame = FrameWidget( 'Rigs', None, 600 )
self.l.addWidget( self.rig_frame )
widget = QWidget( self.rig_frame )
vLayout = QVBoxLayout( self )
self.rig_frame.setLayout( vLayout )
hLayout = QHBoxLayout( self )
vLayout.addLayout( hLayout )
# Buttons
button1 = QPushButton( 'Save' )
button2 = QPushButton( 'Load' )
button1.clicked.connect( self.rig_export_dialog )
for button in [ button1, button2 ]:
button.setMinimumWidth( self.button_width )
button.setMaximumWidth( self.button_width )
button.setMinimumHeight( self.button_height )
button.setMaximumHeight( self.button_height )
hLayout.addWidget( button1 )
hLayout.addWidget( button2 )
hLayout.addStretch()
# Split Layout
self.rig_split = QSplitter( Qt.Horizontal )
self.rig_split.splitterMoved.connect( partial ( self.check_columns, kLibRig ) )
# Tree View
self.rig_tree_scroll = QScrollArea()
self.rig_tree_scroll.setVerticalScrollBarPolicy( QtCore.Qt.ScrollBarAsNeeded )
self.rig_tree_scroll.setHorizontalScrollBarPolicy( QtCore.Qt.ScrollBarAlwaysOff )
self.rig_tree_scroll.setWidgetResizable( True )
self.rig_tree_view = aniMetaTreeWidget()
self.rig_tree_view.setHeaderLabel( 'Rig Settings Folders' )
self.rig_tree_view.installEventFilter( self )
self.rig_tree_view.setContextMenuPolicy( QtCore.Qt.CustomContextMenu )
self.rig_tree_view.customContextMenuRequested.connect( self.rig_tree_ctx_menu | |
another aperture.
"""
# find the room and include the room shell geometry.
# include the rest of the scene except for indoor geometries for that room.
# here is a place that light-path is necessary to be able to know what is indoor
# and what is outdoor.
raise NotImplementedError()
def aperture_files(self, black_out=False, rel_path=True):
"""Return list of files for apertures.
This list includes both geometry and modifier files. This method will raise a
ValueError if it cannot find a modifier file with the same name as the geometry
file.
Args:
black_out (str): Set black_out to True for "isolated" studies for aperture
groups.
rel_path (str): Set rel_path to False for getting full path to files. By
default the path is relative to study folder root.
"""
cfg = self._config['APERTURE']
pattern = cfg['geo_pattern']
geometry_files = self._find_files(
self.aperture_folder(full=True), pattern, rel_path
)
pattern = cfg['blk_pattern'] if black_out else cfg['mod_pattern']
modifier_files = self._find_files(
self.aperture_folder(full=True), pattern, rel_path
)
return self._match_files(modifier_files, geometry_files)
def aperture_group_files_black(self, exclude=None, rel_path=False):
"""Return list of files for black aperture groups.
Args:
exclude: Identifier of aperture groups to exclude from the list of black
aperture groups files. This input can be either a single aperture group
identifier or a list of aperture group identifiers.
rel_path: Set rel_path to True for getting full path to files. By
default the path is relative to study folder root.
"""
if exclude and not isinstance(exclude, (list, tuple)):
exclude = [exclude]
else:
exclude = []
states = self.aperture_groups_states(full=True)
blk_files = []
for aperture_group, ap_states in states.items():
if aperture_group in exclude:
continue
blk_file = os.path.normpath(ap_states[0]['black'])
blk_file = os.path.join(self.aperture_group_folder(full=rel_path), blk_file)
blk_files.append(self._as_posix(blk_file))
return blk_files
def scene_files(self, black_out=False, rel_path=True):
"""Return list of files for scene.
Args:
black_out (str): Set black_out to True for direct sunlight studies.
rel_path (str): Set rel_path to False for getting full path to files. By
default the path is relative to study folder root.
"""
cfg = self._config['SCENE']
pattern = cfg['geo_pattern']
geometry_files = self._find_files(
self.scene_folder(full=True), pattern, rel_path
)
pattern = cfg['blk_pattern'] if black_out else cfg['mod_pattern']
modifier_files = self._find_files(
self.scene_folder(full=True), pattern, rel_path
)
return self._match_files(modifier_files, geometry_files)
def grid_files(self, rel_path=True, group=None):
"""Return list of grid files."""
cfg = self._config['GRID']
pattern = cfg['grid_pattern']
if not group:
grid_files = self._find_files(
self.grid_folder(full=True), pattern, rel_path
)
else:
grid_files = self._find_files(
os.path.join(self.grid_folder(full=True), group), pattern, rel_path
)
return grid_files
def grid_info_files(self, rel_path=True):
"""Return list of grid information files."""
cfg = self._config['GRID']
pattern = cfg['info_pattern']
grid_info_files = self._find_files(
self.grid_folder(full=True), pattern, rel_path
)
return grid_info_files
def view_files(self, rel_path=True):
"""Return list of view files."""
cfg = self._config['VIEW']
pattern = cfg['view_pattern']
view_files = self._find_files(
self.view_folder(full=True), pattern, rel_path
)
return view_files
def view_info_files(self, rel_path=True):
"""Return list of view information files."""
cfg = self._config['VIEW']
pattern = cfg['info_pattern']
view_info_files = self._find_files(
self.view_folder(full=True), pattern, rel_path
)
return view_info_files
def receiver_files(self, rel_path=True):
"""Return list of receiver files."""
cfg = self._config['RECEIVER']
pattern = cfg['receiver_pattern']
receiver_files = self._find_files(
self.receiver_folder(full=True), pattern, rel_path
)
return receiver_files
def receiver_info_file(self, rel_path=True):
"""Return the receiver information file."""
cfg = self._config['RECEIVER']
pattern = cfg['info_pattern']
receiver_info_file = self._find_files(
self.receiver_folder(full=True), pattern, rel_path
)
return receiver_info_file[0]
def aperture_groups(self, interior=False, reload=False):
"""List of apertures groups.
Args:
interior (bool): Boolean switch to return interior dynamic apertures.
reload (bool): Dynamic geometries are loaded the first time this
method is called. To reload the files set reload to True.
Returns:
A list of dynamic apertures.
"""
if reload or self._aperture_groups_load:
# load dynamic apertures
self._load_aperture_groups()
self._aperture_groups_load = False
return self._aperture_group_interior if interior else self._aperture_group
def aperture_groups_states(self, full=False, interior=False):
"""Return states information for aperture groups.
Arg:
full: A boolean to note if the path should be a full path or a relative path
(default: False).
interior: Set to True to get the states information for the interior aperture
groups.
"""
apt_group_folder = self.aperture_group_folder(full=full, interior=interior)
if interior:
states_file = os.path.join(
apt_group_folder, self._config['INTERIOR-APERTURE-GROUP']['states'])
else:
states_file = os.path.join(
apt_group_folder, self._config['APERTURE-GROUP']['states'])
return parse_states(states_file)
def combined_receivers(self, folder='receiver', auto_mtx_path=False):
"""Write combined receiver files to folder.
This function writes a combined receiver file of the aperture groups for all
grids in the folder. It will look for the light paths (aperture groups) of the
grid and include only aperture groups that has a mtx file. This is intended for
matrix-based daylight simulations, e.g. 3-phase, in which a view matrix is
calculated. The combined receiver file allow multiple view matrices to be
calculated at once, while still saving the result of each aperture group in a
unique view matrix file.
Arg:
folder: A path of the target folder to write files to (default: 'receiver').
auto_mtx_path: If set to True, then the path of the view matrices will be
specified automatically.
Returns:
A dictionary containing grid identifiers as keys and the receiver rad files
as values.
"""
grids = self.grid_data_all() or []
apt_group_folder = self.aperture_group_folder(full=False)
states = self.aperture_groups_states(full=True)
rec_folder = os.path.join(
self.model_folder(True), folder
)
if not os.path.isdir(rec_folder):
os.mkdir(rec_folder)
receivers_info = []
# find the light_path for each grid
for grid in grids:
if not 'light_path' in grid or not grid['light_path']:
# The light-path for this grid is not set
# This grid will be ignored for 3/5 phase studies
warnings.warn(
'%s sensor grid has no light-path. It will not be included in three '
'or five phase studies.' % grid['name']
)
continue
light_path = grid['light_path']
# remove the static windows and non-bsdf groups
aperture_groups = [
p[0] for p in light_path if p[0] in states and 'vmtx' in states[p[0]][0]
]
if not aperture_groups:
# The light-path for this grid is static or
# non-bsdf groups
warnings.warn(
'%s sensor grid has no view matrix receiver. It will not be '
'included in three or five phase studies.' % grid['name']
)
continue
# write combined receiver for grid
receiver_file = combined_receiver(
grid['identifier'],
apt_group_folder,
aperture_groups,
rec_folder, add_output_header=auto_mtx_path
)
receivers_info.append(
{
'identifier': grid['identifier'],
'count': grid['count'],
'path': receiver_file,
'aperture_groups': aperture_groups
}
)
receivers_info_file = os.path.join(rec_folder, '_info.json')
with open(receivers_info_file, 'w') as outf:
outf.write(json.dumps(receivers_info, indent=2))
return receivers_info
def octree_scene_mapping(self, exclude_static=True, phase=2):
"""List of rad files for each state of aperture groups. These files can be used
to create the octree for each specific state for dynamic daylight simulations.
Arg:
exclude_static: A boolean to note whether static apertures are included. If
True static apertures will be treated as a state, and a list of scene
files for static apertures will be created.
phase: An integer to note which multiphase study to generate the list of
grids for. Chose between 2, 3, and 5."""
# check if phase is valid
if phase not in [2, 3, 5]:
raise ValueError(
'%s is not a valid phase. Must be 2, 3 or 5.' % phase
)
two_phase, three_phase, five_phase = [], [], []
# two phase static apertures
if self.has_aperture and not exclude_static:
scene_files = self.scene_files() + self.aperture_files()
scene_files_direct = self.scene_files(black_out=True) + self.aperture_files()
if self.has_aperture_group:
# add black aperture groups if any
scene_files += self.aperture_group_files_black()
scene_files_direct += self.aperture_group_files_black()
two_phase.append(
{
'light_path': '__static_apertures__',
'identifier': '__static_apertures__',
'scene_files': scene_files,
'scene_files_direct': scene_files_direct
}
)
if self.has_aperture_group:
states = self.aperture_groups_states(full=True)
# add scene files for each state. Static apertures and all other aperture
# groups will be black
for aperture_group, ap_states in states.items():
for state in ap_states:
if not 'tmtx' in state or ('tmtx' in state and phase == 2):
pattern = '%s$' % state['default'].replace('./', '')
scene_files = self.scene_files() + \
self.aperture_files(black_out=True) + \
self._find_files(
self.aperture_group_folder(full=True), pattern) + \
self.aperture_group_files_black(exclude=aperture_group)
scene_files_direct = self.scene_files(black_out=True) + \
self.aperture_files(black_out=True) + \
self._find_files(
self.aperture_group_folder(full=True), pattern) + \
self.aperture_group_files_black(exclude=aperture_group)
two_phase.append(
{
'light_path': aperture_group,
'identifier': state['identifier'],
'scene_files': scene_files,
'scene_files_direct': scene_files_direct
}
)
else:
# five phase
pattern = '%s$' % state['direct'].replace('./', '')
five_phase.append(
{
'light_path': aperture_group,
'identifier': state['identifier'],
'scene_files_direct': self.scene_files(black_out=True) + \
self.aperture_files(black_out=True) + \
self._find_files(self.aperture_group_folder(full=True),
pattern) + \
self.aperture_group_files_black(exclude=aperture_group)
}
)
# three phase
three_phase.append(
{
'light_path': None,
'identifier': '__three_phase__',
'scene_files': self.scene_files() + self.aperture_files(),
'scene_files_direct': self.scene_files(black_out=True) + \
self.aperture_files(black_out=True)
}
)
if phase == 2:
scene_mapping = {
'two_phase': two_phase
}
if phase == | |
there are clean components, then
#do a "soft" denoising
else:
full_matrix_to_be_used = np.vstack((noise_comps_post_filter, clean_comps_post_filter))[:,good_timepoint_inds].transpose()
noise_comps_post_filter_T_to_be_used = noise_comps_post_filter[:,good_timepoint_inds].transpose()
XT_X_Neg1_XT = imaging_utils.calculate_XT_X_Neg1_XT(full_matrix_to_be_used)
for temp_time_signal_dim in range(filtered_time_series.shape[0]):
regressed_time_signal[good_timepoint_inds,temp_time_signal_dim] = imaging_utils.partial_clean_fast(filtered_time_series_T[good_timepoint_inds,temp_time_signal_dim], XT_X_Neg1_XT, noise_comps_post_filter_T_to_be_used)
#Put back into original dimensions
regressed_time_signal = regressed_time_signal.transpose()
#Now apply interpolation
interpolated_time_signal = np.zeros(regressed_time_signal.shape)
if interpolation_method == 'spectral':
interpolated_time_signal = spectral_interpolation_fast(good_timepoints, regressed_time_signal, parc_obj.TR)
else:
for dim in range(regressed_time_signal.shape[0]):
interpolated_time_signal[dim,:] = interpolate(good_timepoints, regressed_time_signal[dim,:], interpolation_method, parc_obj.TR)
#Now if necessary, apply additional filterign:
if high_pass == False and low_pass == False:
filtered_time_signal = interpolated_time_signal
else:
if high_pass != False and low_pass == False:
b, a = imaging_utils.construct_filter('highpass', [high_pass], parc_obj.TR, 6)
elif high_pass == False and low_pass != False:
b, a = imaging_utils.construct_filter('lowpass', [low_pass], parc_obj.TR, 6)
elif high_pass != False and low_pass != False:
b, a = imaging_utils.construct_filter('bandpass', [high_pass, low_pass], parc_obj.TR, 6)
filtered_time_signal = np.zeros(regressed_time_signal.shape)
for dim in range(regressed_time_signal.shape[0]):
filtered_time_signal[dim,:] = imaging_utils.apply_filter(b,a,regressed_time_signal[dim,:])
#Now set all the undefined timepoints to Nan
cleaned_time_signal = filtered_time_signal
cleaned_time_signal[:,bad_timepoint_inds] = np.nan
return cleaned_time_signal, good_timepoint_inds
def flexible_orth_denoise_parc(parc_obj, hpf_before_regression, scrub_criteria_dictionary, interpolation_method, noise_comps_dict, clean_comps_dict, high_pass, low_pass):
#THIS FUNCTION IS THE SAME AS FLEXIBLE DENOISE PARC,
#EXCEPT FOR HERE, THE REGRESSORS IDENTIFIED BY CLEAN
#COMPS DICT ARE REGRESSED FROM THE REGRESSORS IDENTIFIED
#BY NOISE COMPS DICT PRIOR TO THE REGRESSORS FROM NOISE COMPS
#DICT BEING USED TO CLEAN THE TIMESERIES. THIS MEANS THE MODEL
#TO CLEAN THE TIMESERIES WILL ONLY CONTAIN THE ORTHOGONALIZED
#NUISANCE VARIABLES (filtering and other options will be applied
#as per usual)
#Function inputs:
#parc_object = a parcellated timeseries object generated from
#file "imaging_utility_classes.py" which will contain both an
#uncleaned parcellated time series, and other nuisance variables
# etc. of interest
#hpf_before_regression = the cutoff frequency for an optional high
#pass filter that can be applied to the nuisance regressors (noise/clean) and the
#uncleaned time signal before any regression or scrubbing occurs. Recommended
#value would be 0.01 or False (False for if you want to skip this step)
#scrub_criteria_dictionary = a dictionary that describes how scrubbing should be
#implemented. Three main options are (1) instead of inputting a dictionary, setting this
#variable to False, which will skip scrubbing, (2) {'Uniform' : [AMOUNT_TO_KEEP, ['std_dvars', 'framewise_displacement']]},
#which will automatically only keep the best timepoints (for if you want all subjects to be scrubbed an equivelant amount).
#This option will keep every timepoint if AMOUNT_TO_KEEP was 1, and no timepoints if it was 0. The list of confounds following
#AMOUNT_TO_KEEP must at least contain one metric (but can be as many as you want) from parc_object.confounds. If more than one
#metric is given, they will be z-transformed and their sum will be used to determine which timepoints should be
#kept, with larger values being interpreted as noiser (WHICH MEANS THIS OPTION SHOULD ONLY BE USED WITH METRICS WHERE
#ZERO OR NEGATIVE BASED VALUES ARE FINE AND LARGE POSITIVE VALUES ARE BAD) - this option could potentially produce
#slightly different numbers of timepoints accross subjects still if the bad timepoints overlap to varying degrees with
#the number of timepoints that are dropped at the beginning of the scan. (3) {'std_dvars' : 1.2, 'framewise_displacement' : 0.5} -
#similar to the "Uniform" option, the input metrics should be found in parc_object.confounds. Here only timepoints
#with values below all specified thresholds will be kept for further analyses
#interpolation_method: options are 'linear', 'cubic_spline' and (IN FUTURE) 'spectral'.
#While scrubbed values are not included to determine any of the weights in the denoising
#model, they will still be interpolated over and then "denoised" (have nuisance variance
#removed) so that we have values to put into the optional filter at the end of processing.
#The interpolated values only have any influence on the filtering proceedure, and will be again
#removed from the time signal after filtering and thus not included in the final output. Interpolation
#methods will do weird things if there aren't many timepoints after scrubbing. All interpolation
#schemes besides spectral are essentially wrappers over scipy's 1d interpolation methods. 'spectral'
#interpolation is implemented based on code from <NAME>/<NAME>
#as shown in Power's 2014 NeuroImage paper
#noise_comps_dict and clean_comps_dict both have the same syntax. The values
#specified by both of these matrices will be used (along with constant and linear trend)
#to construct the denoising regression model for the input timeseries, but only the
#noise explained by the noise_comps_dict will be removed from the input timeseries (
#plus also the constant and linear trend). Unlike the scrub_criteria_dictionary, the
#data specifed here do not need to come from the confounds section of the parc_object,
#and because of this, if you want to include something found under parc_object.confounds,
#you will need to specify "confounds" in the name. An example of the dictionary can be seen below:
#
# clean_comps_dict = {'aroma_clean_ics' : False}
#
#
# noise_comps_dict = {'aroma_noise_ics' : 5,
# 'confounds.wmcsfgsr' : False
# 'confounds.twelve_motion_regs' : False
# }
#
#
#The dictionary key should specify an element to be included in the denoising process
#and the dictionary value should be False if you don't want to do a PCA reduction on
#the set of nuisance variables (this will be the case more often than not), alternatively
#if the key represents a grouping of confounds, then you can use the value to specify the
#number of principal components to kept from a reduction of the grouping. If hpf_before_regression
#is used, the filtering will happen after the PCA.
#
#
#
#high_pass, low_pass: Filters to be applied as the last step in processing.
#set as False if you don't want to use them, otherwise set equal to the
#cutoff frequency
#
#If any of the input parameters are set to True, they will be treated as if they were
#set to False, because True values wouldn't mean anything....
#
#
#
#################################################################################################
#################################################################################################
#################################################################################################
#################################################################################################
#################################################################################################
#################################################################################################
#Create an array with 1s for timepoints to use, and 0s for scrubbed timepointsx
good_timepoints = find_timepoints_to_scrub(parc_obj, scrub_criteria_dictionary)
#Load the arrays with the data for both the clean and noise components to be used in regression
clean_comps_pre_filter = load_comps_dict(parc_obj, clean_comps_dict)
noise_comps_pre_filter = load_comps_dict(parc_obj, noise_comps_dict)
#Apply an initial HPF to everything if necessary - this does not remove scrubbed timepoints,
#but does skips the first n_skip_vols (which will be set to 0 and not used in subsequent steps)
if hpf_before_regression != False:
b, a = imaging_utils.construct_filter('highpass', [hpf_before_regression], parc_obj.TR, 6)
#start with the clean comps matrix
if type(clean_comps_pre_filter) != type(False):
clean_comps_post_filter = np.zeros(clean_comps_pre_filter.shape)
for clean_dim in range(clean_comps_pre_filter.shape[0]):
clean_comps_post_filter[clean_dim, parc_obj.n_skip_vols:] = imaging_utils.apply_filter(b, a, clean_comps_pre_filter[clean_dim, parc_obj.n_skip_vols:])
#this option for both clean/noise indicates there is no input matrix to filter
else:
clean_comps_post_filter = False
#Move to the noise comps matrix
if type(noise_comps_pre_filter) != type(False):
noise_comps_post_filter = np.zeros(noise_comps_pre_filter.shape)
for noise_dim in range(noise_comps_pre_filter.shape[0]):
noise_comps_post_filter[noise_dim, parc_obj.n_skip_vols:] = imaging_utils.apply_filter(b, a, noise_comps_pre_filter[noise_dim, parc_obj.n_skip_vols:])
else:
noise_comps_post_filter = False
#then filter the original time signal
filtered_time_series = np.zeros(parc_obj.time_series.shape)
for original_ts_dim in range(parc_obj.time_series.shape[0]):
filtered_time_series[original_ts_dim, parc_obj.n_skip_vols:] = imaging_utils.apply_filter(b, a, parc_obj.time_series[original_ts_dim, parc_obj.n_skip_vols:])
#If you don't want to apply the initial HPF, then
#just make a copy of the matrices of interest
else:
clean_comps_post_filter = clean_comps_pre_filter
noise_comps_post_filter = noise_comps_pre_filter
filtered_time_series = parc_obj.time_series
#Now create the nuisance regression model. Only do this step if
#the noise_comps_post_filter isn't false.
good_timepoint_inds = np.where(good_timepoints == True)[0]
bad_timepoint_inds = np.where(good_timepoints == False)[0]
if type(noise_comps_post_filter) == type(False):
regressed_time_signal = filtered_time_series
else:
#Weird thing where I need to swap dimensions here...(implemented correctly)
#First add constant/linear trend to the denoising model
constant = np.ones((1,filtered_time_series.shape[1]))
linear_trend = np.linspace(0,filtered_time_series.shape[1],num=filtered_time_series.shape[1])
linear_trend = np.reshape(linear_trend, (1,filtered_time_series.shape[1]))[0]
noise_comps_post_filter = np.vstack((constant, linear_trend, noise_comps_post_filter))
regressed_time_signal = np.zeros(filtered_time_series.shape).transpose()
filtered_time_series_T = | |
<reponame>the-utkarshjain/GUI-for-Fortran<filename>gui_func.py
"""
@name
`gui_func.py`
@description
`src file for GUI Main class, subclass of <GUIBase>`
@package
`GUI for Fortran/C++ Application`
@official_repository
`https://github.com/the-utkarshjain/GUI-for-Fortran`
@contributors
* <NAME>
* <NAME>
* <NAME>
* Navya
* <NAME>`
"""
from gui_base import GUIBase, PlotEncapsulator, GUI_exception
import threading
import time
import numpy as np
import matplotlib.pyplot as plt
import os
import hashlib
import random
import subprocess
from copy import deepcopy
import math
class GUIMain(GUIBase):
r'''
Primary class to be used for
implementation.
'''
def __init__(self, *args, **kwargs):
super(GUIMain, self).__init__(*args, **kwargs)
@classmethod
@GUI_exception
def _refresh_utility(cls, first_file_path: str, second_file_path: str, third_file_path: str, memory: dict) -> bool:
r'''
Function to check if any input file is changed/updated
and run the executable in case files are changed/updated.
'''
isupdates = False
checksum_file_1 = hashlib.sha256(open(first_file_path, 'r').read().encode('utf-8')).hexdigest()
checksum_file_2 = hashlib.sha256(open(second_file_path, 'r').read().encode('utf-8')).hexdigest()
checksum_file_3 = hashlib.sha256(open(third_file_path, 'r').read().encode('utf-8')).hexdigest()
if memory.get(first_file_path, None) != checksum_file_1:
memory[first_file_path] = checksum_file_1
isupdates = True
if memory.get(second_file_path, None) != checksum_file_2:
memory[second_file_path] = checksum_file_2
isupdates = True
if memory.get(third_file_path, None) != checksum_file_3:
memory[third_file_path] = checksum_file_3
isupdates = True
if isupdates == False:
print("No updates found in the input file.")
return type(isupdates) is bool
@classmethod
@GUI_exception
def _nonblocking_execute_external_code(cls, exe_file_path: str, thread_queue: list):
r'''
Function to execute the fortran
executable and generate output file.
'''
def target_func(x):
return subprocess.call([x])
t = threading.Thread(target=target_func, args=(exe_file_path,))
thread_queue.append(t)
t.start()
@PlotEncapsulator
@GUI_exception
def _plot_first_2D_data(self, output_file_path: str, time_file_path: str):
r'''
Function to plot experimental data
with reespect to timestamp.
'''
time = []
conc = []
with open(time_file_path,'r') as file:
data = file.read().splitlines()
for row in range(1,len(data)):
if row>int(data[0]):
time.append(float(data[row]))
with open(output_file_path,'r') as file:
data = file.read().splitlines()
for line in data:
x, y = line.split()
x = float(x)
conc.append(x)
plt.scatter(time,conc, marker='o', color='black')
plt.xlabel('Time')
plt.ylabel('Concentration')
plt.title('Observed-BTC [ {} ]'.format(self.mode))
@PlotEncapsulator
@GUI_exception
def _plot_second_2D_data(self, output_file_path: str, time_file_path: str):
r'''
Function to plot simulation data
with reespect to timestamp.
'''
time = []
conc = []
with open(time_file_path,'r') as file:
data = file.read().splitlines()
for row in range(1,len(data)):
if row>int(data[0]):
time.append(float(data[row]))
with open(output_file_path,'r') as file:
data = file.read().splitlines()
for line in data:
x, y = line.split()
y = float(y)
conc.append(y)
plt.plot(time,conc)
plt.xlabel('Time')
plt.ylabel('Concentration')
plt.title('Concentration-Time graph [ {} ]'.format(self.mode))
@PlotEncapsulator
@GUI_exception
def _plot_both_2D_data(self, output_file_path: str, time_file_path: str):
r'''
Function to plot simulation and experimental data
on same canvas.
'''
time = []
conc1 = []
conc2=[]
with open(time_file_path,'r') as file:
data = file.read().splitlines()
for row in range(1,len(data)):
if row>int(data[0]):
time.append(float(data[row]))
with open(output_file_path,'r') as file:
data = file.read().splitlines()
for line in data:
x, y = line.split()
x, y = float(x), float(y)
conc1.append(x)
conc2.append(y)
plt.scatter(time,conc1, marker='o',label='Observed',color='black')
plt.plot(time,conc2, label='Simulated [ {} ]'.format(self.mode))
plt.xlabel('Time')
plt.ylabel('Concentration')
plt.title('Simulated-BTC [ {} ]'.format(self.mode))
plt.legend()
@classmethod
@GUI_exception
def _inplace_update_variable_dictionary(cls, first_file_path: str, second_file_path: str, third_file_path: str, variable_dictionary: dict) -> None:
r'''
Function to update variable dictionary in place
using the manually entered values
'''
try:
file1 = open(first_file_path, "r")
variable_dictionary["Mesopore seepage velocity"] = file1.readline().strip()
variable_dictionary["Macropore seepage velocity"] = file1.readline().strip()
variable_dictionary["Solute mass transfer rate b/w meso-micropore"] = file1.readline().strip()
variable_dictionary["Solute mass transfer rate b/w meso-macropore"] = file1.readline().strip()
variable_dictionary["Dispersivity"] = file1.readline().strip()
variable_dictionary["No. of observation time steps"] = file1.readline().strip()
finally:
file1.close()
try:
file3 = open(third_file_path, "r")
line = file3.readline().split()
variable_dictionary["nz"] = line[0]
variable_dictionary["nm"] = line[1]
line = file3.readline().split()
variable_dictionary["Length"] = line[0]
variable_dictionary["Bulk density of porous media"] = line[1]
variable_dictionary["Run time"] = line[2]
variable_dictionary["Pulse time"] = line[3]
variable_dictionary["delta_t"] = line[4]
variable_dictionary["delta_x"] = line[5]
line = file3.readline().split()
variable_dictionary["Porosity of the macropore region"] = line[0]
variable_dictionary["Porosity of the mesopore region"] = line[1]
variable_dictionary["Porosity of the micropore region"] = line[2]
line = file3.readline().split()
variable_dictionary["Instantaneous sorption fraction in macropore region"] = line[0]
variable_dictionary["Instantaneous sorption fraction in mesopore region"] = line[1]
variable_dictionary["Instantaneous sorption fraction in micropore region"] = line[2]
variable_dictionary["Fraction of sorption site available for macropore region"] = line[3]
variable_dictionary["Fraction of sorption site available for mesopore region"] = line[4]
variable_dictionary["Fraction of sorption site available for immobile region"] = line[5]
line = file3.readline().split()
variable_dictionary["Equilibrium sorption coefficient in macropore region"] = line[0]
variable_dictionary["Equilibrium sorption coefficient in mesopore region"] = line[1]
variable_dictionary["Equilibrium sorption coefficient in micropore region"] = line[2]
variable_dictionary["Rate-limited sorbed coefficient in macropore region"] = line[3]
variable_dictionary["Rate-limited sorbed coefficient in mesopore region"] = line[4]
variable_dictionary["Rate-limited sorbed coefficient in micropore region"] = line[5]
finally:
file3.close()
@classmethod
@GUI_exception
def _write_updated_values(cls, first_file_path: str, second_file_path: str, third_file_path: str, variable_dictionary: dict) -> None:
r'''
Function to update values (manually entered by user)
in the files from the variable dictionary.
'''
try:
with open(first_file_path, "r") as f:
temp = f.readlines()
file1 = open(first_file_path, "w")
lines = ["Mesopore seepage velocity", "Macropore seepage velocity", "Solute mass transfer rate b/w meso-micropore", "Solute mass transfer rate b/w meso-macropore", "Dispersivity", "No. of observation time steps"]
for line in lines[:-1]:
file1.write(variable_dictionary[line]+str("\n"))
file1.write(str(int(float(variable_dictionary[lines[-1]])))+str("\n"))
file1.write("".join(temp[6:]))
finally:
file1.close()
try:
file3 = open(third_file_path, "w")
lines = ["nz", "nm"]
for line in lines[:-1]:
file3.write(str(int(float(variable_dictionary[line])))+str(" "))
file3.write(str(int(float(variable_dictionary[lines[-1]])))+str("\n"))
lines = ["Length", "Bulk density of porous media", "Run time", "Pulse time", "delta_t", "delta_x"]
for line in lines[:-1]:
file3.write(variable_dictionary[line]+str(" "))
file3.write(variable_dictionary[lines[-1]]+str("\n"))
lines = ["Porosity of the macropore region", "Porosity of the mesopore region", "Porosity of the micropore region"]
for line in lines[:-1]:
file3.write(variable_dictionary[line]+str(" "))
file3.write(variable_dictionary[lines[-1]]+str("\n"))
lines = ["Instantaneous sorption fraction in macropore region", "Instantaneous sorption fraction in mesopore region", "Instantaneous sorption fraction in micropore region", "Fraction of sorption site available for macropore region", "Fraction of sorption site available for mesopore region", "Fraction of sorption site available for immobile region"]
for line in lines[:-1]:
file3.write(variable_dictionary[line]+str(" "))
file3.write(variable_dictionary[lines[-1]]+str("\n"))
lines = ["Equilibrium sorption coefficient in macropore region", "Equilibrium sorption coefficient in mesopore region", "Equilibrium sorption coefficient in micropore region", "Rate-limited sorbed coefficient in macropore region", "Rate-limited sorbed coefficient in mesopore region", "Rate-limited sorbed coefficient in micropore region"]
for line in lines[:-1]:
file3.write(variable_dictionary[line]+str(" "))
file3.write(variable_dictionary[lines[-1]]+str("\n"))
finally:
file3.close()
@GUI_exception
def _export_timestamps_data(self, time_series: list, first_file_path: str, second_file_path: str, third_file_path: str) -> None:
r'''
Function to export timestamp data
to an external file.
'''
time_series_copy = map(lambda x: str(x), time_series)
as_string = "\n".join(time_series_copy)
with open(second_file_path, "w") as f:
f.write(as_string)
@GUI_exception
def _import_timestamps_data(self, first_file_path: str, second_file_path: str, third_file_path: str) -> list:
r'''
Function to import time-stamp data
manually.
'''
try:
file2 = open(second_file_path, "r")
skip = int(file2.readline())
for i in range(skip):
file2.readline()
timestamps_data = file2.read().splitlines()
finally:
file2.close()
time_series = [float(i) for i in timestamps_data]
return time_series
@GUI_exception
def _export_concentration_data(self, time_series: list, first_file_path: str, second_file_path: str, third_file_path: str) -> None:
r'''
Function to export concentration data
to an external file.
'''
time_series_copy = map(lambda x: str(x), time_series)
as_string = "\n".join(time_series_copy)
with open(first_file_path, "w") as f:
f.write(as_string)
@GUI_exception
def _import_concentration_data(self, first_file_path: str, second_file_path: str, third_file_path: str) -> list:
r'''
Function to import concentration data
manually.
'''
try:
file1 = open(first_file_path, "r")
for i in range(6):
file1.readline()
concentration_data = file1.read().splitlines()
finally:
file1.close()
consentration_series = [float(i) for i in concentration_data]
return consentration_series
@GUI_exception
def _initialize_variables(self):
r'''
Function to initialise variables in accordance with
the modes as suggested in first feedback.
'''
return {
"ADE": {
"nz": None,
"nm": None,
"Length": None,
"Bulk density of porous media": None,
"Run time": None,
"Pulse time": None,
"delta_t": None,
"delta_x": None,
"Porosity of the macropore region": None,
"Porosity of the mesopore region": 1e-16,
"Porosity of the micropore region": 1e-16,
"Instantaneous sorption fraction in macropore region": 1,
"Instantaneous sorption fraction in mesopore region": 1e-16,
"Instantaneous sorption fraction in micropore region": 1e-16,
"Fraction of sorption site available for macropore region": 1,
"Fraction of sorption site available for mesopore region": 1e-16,
"Fraction of sorption site available for immobile region": 1e-16,
"Equilibrium sorption coefficient in macropore region": None,
"Equilibrium sorption coefficient in mesopore region": 1e-16,
"Equilibrium sorption coefficient in micropore region": 1e-16,
"Rate-limited sorbed coefficient in macropore region": 1e-16,
"Rate-limited sorbed coefficient in mesopore region": 1e-16,
"Rate-limited sorbed coefficient in micropore region": 1e-16,
"Mesopore seepage velocity": 1e-16,
"Macropore seepage velocity": None,
"Solute mass transfer rate b/w meso-micropore": 1e-16,
"Solute mass transfer rate b/w meso-macropore": 1e-16,
"Dispersivity": None,
"No. of observation time steps": None,
},
"MIM": {
"nz": None,
"nm": None,
"Length": None,
"Bulk density of porous media": None,
"Run time": None,
"Pulse time": None,
"delta_t": None,
"delta_x": None,
"Porosity of the macropore region": None,
"Porosity of the | |
import pandas as pd
from . import register_class, EXP_NAMESPACE
from ..container import Table, Row, Container, AbstractContainer
from ..utils import docval, popargs
class KeyTable(Table):
"""
A table for storing keys used to reference external resources
"""
__defaultname__ = 'keys'
__columns__ = (
{'name': 'key', 'type': str,
'doc': 'The user key that maps to the resource term / registry symbol.'},
)
class Key(Row):
"""
A Row class for representing rows in the KeyTable
"""
__table__ = KeyTable
class ResourceTable(Table):
"""
A table for storing names of ontology sources and their uri
"""
__defaultname__ = 'resources'
__columns__ = (
{'name': 'resource', 'type': str,
'doc': 'The resource/registry that the term/symbol comes from.'},
{'name': 'resource_uri', 'type': str,
'doc': 'The URI for the resource term / registry symbol.'},
)
class Resource(Row):
"""
A Row class for representing rows in the ResourceTable
"""
__table__ = ResourceTable
class EntityTable(Table):
"""
A table for storing the external resources a key refers to
"""
__defaultname__ = 'entities'
__columns__ = (
{'name': 'keys_idx', 'type': (int, Key),
'doc': ('The index into the keys table for the user key that '
'maps to the resource term / registry symbol.')},
{'name': 'resources_idx', 'type': (int, Resource),
'doc': 'The index into the ResourceTable.'},
{'name': 'entity_id', 'type': str,
'doc': 'The unique ID for the resource term / registry symbol.'},
{'name': 'entity_uri', 'type': str,
'doc': 'The URI for the resource term / registry symbol.'},
)
class Entity(Row):
"""
A Row class for representing rows in the EntityTable
"""
__table__ = EntityTable
class ObjectTable(Table):
"""
A table for storing objects (i.e. Containers) that contain keys that refer to external resources
"""
__defaultname__ = 'objects'
__columns__ = (
{'name': 'object_id', 'type': str,
'doc': 'The object ID for the Container/Data'},
{'name': 'field', 'type': str,
'doc': 'The field on the Container/Data that uses an external resource reference key'},
)
class Object(Row):
"""
A Row class for representing rows in the ObjectTable
"""
__table__ = ObjectTable
class ObjectKeyTable(Table):
"""
A table for identifying which keys are used by which objects for referring to external resources
"""
__defaultname__ = 'object_keys'
__columns__ = (
{'name': 'objects_idx', 'type': (int, Object),
'doc': 'the index into the objects table for the object that uses the key'},
{'name': 'keys_idx', 'type': (int, Key),
'doc': 'the index into the key table that is used to make an external resource reference'}
)
class ObjectKey(Row):
"""
A Row class for representing rows in the ObjectKeyTable
"""
__table__ = ObjectKeyTable
@register_class('ExternalResources', EXP_NAMESPACE)
class ExternalResources(Container):
"""A table for mapping user terms (i.e. keys) to resource entities."""
__fields__ = (
{'name': 'keys', 'child': True},
{'name': 'resources', 'child': True},
{'name': 'objects', 'child': True},
{'name': 'object_keys', 'child': True},
{'name': 'entities', 'child': True},
)
@docval({'name': 'name', 'type': str, 'doc': 'the name of this ExternalResources container'},
{'name': 'keys', 'type': KeyTable, 'default': None,
'doc': 'the table storing user keys for referencing resources'},
{'name': 'resources', 'type': ResourceTable, 'default': None,
'doc': 'the table for storing names of resources and their uri'},
{'name': 'entities', 'type': EntityTable, 'default': None,
'doc': 'the table storing entity information'},
{'name': 'objects', 'type': ObjectTable, 'default': None,
'doc': 'the table storing object information'},
{'name': 'object_keys', 'type': ObjectKeyTable, 'default': None,
'doc': 'the table storing object-resource relationships'})
def __init__(self, **kwargs):
name = popargs('name', kwargs)
super().__init__(name)
self.keys = kwargs['keys'] or KeyTable()
self.resources = kwargs['resources'] or ResourceTable()
self.entities = kwargs['entities'] or EntityTable()
self.objects = kwargs['objects'] or ObjectTable()
self.object_keys = kwargs['object_keys'] or ObjectKeyTable()
@docval({'name': 'key_name', 'type': str,
'doc': 'the name of the key to be added'})
def _add_key(self, **kwargs):
"""
Add a key to be used for making references to external resources
It is possible to use the same *key_name* to refer to different resources so long as the *key_name* is not
used within the same object and field. To do so, this method must be called for the two different resources.
The returned Key objects must be managed by the caller so as to be appropriately passed to subsequent calls
to methods for storing information about the different resources.
"""
key = kwargs['key_name']
return Key(key, table=self.keys)
@docval({'name': 'key', 'type': (str, Key), 'doc': 'the key to associate the entity with'},
{'name': 'resources_idx', 'type': (int, Resource), 'doc': 'the id of the resource'},
{'name': 'entity_id', 'type': str, 'doc': 'unique entity id'},
{'name': 'entity_uri', 'type': str, 'doc': 'the URI for the entity'})
def _add_entity(self, **kwargs):
"""
Add an entity that will be referenced to using the given key
"""
key = kwargs['key']
resources_idx = kwargs['resources_idx']
entity_id = kwargs['entity_id']
entity_uri = kwargs['entity_uri']
if not isinstance(key, Key):
key = self._add_key(key)
resource_entity = Entity(key, resources_idx, entity_id, entity_uri, table=self.entities)
return resource_entity
@docval({'name': 'resource', 'type': str, 'doc': 'the name of the ontology resource'},
{'name': 'uri', 'type': str, 'doc': 'uri associated with ontology resource'})
def _add_resource(self, **kwargs):
"""
Add resource name and uri to ResourceTable that will be referenced by the ResourceTable idx.
"""
resource_name = kwargs['resource']
uri = kwargs['uri']
resource = Resource(resource_name, uri, table=self.resources)
return resource
@docval({'name': 'container', 'type': (str, AbstractContainer),
'doc': 'the Container/Data object to add or the object_id for the Container/Data object to add'},
{'name': 'field', 'type': str, 'doc': 'the field on the Container to add'})
def _add_object(self, **kwargs):
"""
Add an object that references an external resource
"""
container, field = popargs('container', 'field', kwargs)
if isinstance(container, AbstractContainer):
container = container.object_id
obj = Object(container, field, table=self.objects)
return obj
@docval({'name': 'obj', 'type': (int, Object), 'doc': 'the Object to that uses the Key'},
{'name': 'key', 'type': (int, Key), 'doc': 'the Key that the Object uses'})
def _add_external_reference(self, **kwargs):
"""
Specify that an object (i.e. container and field) uses a key to reference
an external resource
"""
obj, key = popargs('obj', 'key', kwargs)
return ObjectKey(obj, key, table=self.object_keys)
def _check_object_field(self, container, field):
"""
A helper function for checking if a container and field have been added.
The container can be either an object_id string or a AbstractContainer.
If the container and field have not been added, add the pair and return
the corresponding Object. Otherwise, just return the Object.
"""
if isinstance(container, str):
objecttable_idx = self.objects.which(object_id=container)
else:
objecttable_idx = self.objects.which(object_id=container.object_id)
if len(objecttable_idx) > 0:
field_idx = self.objects.which(field=field)
objecttable_idx = list(set(objecttable_idx) & set(field_idx))
if len(objecttable_idx) == 1:
return self.objects.row[objecttable_idx[0]]
elif len(objecttable_idx) == 0:
return self._add_object(container, field)
else:
raise ValueError("Found multiple instances of the same object_id and field in object table")
@docval({'name': 'key_name', 'type': str, 'doc': 'the name of the key to get'},
{'name': 'container', 'type': (str, AbstractContainer), 'default': None,
'doc': ('the Container/Data object that uses the key or '
'the object_id for the Container/Data object that uses the key')},
{'name': 'field', 'type': str, 'doc': 'the field of the Container that uses the key', 'default': None})
def get_key(self, **kwargs):
"""
Return a Key or a list of Key objects that correspond to the given key.
If container and field are provided, the Key that corresponds to the given name of the key
for the given container and field is returned.
"""
key_name, container, field = popargs('key_name', 'container', 'field', kwargs)
key_idx_matches = self.keys.which(key=key_name)
if container is not None and field is not None:
# if same key is used multiple times, determine
# which instance based on the Container
object_field = self._check_object_field(container, field)
for row_idx in self.object_keys.which(objects_idx=object_field.idx):
key_idx = self.object_keys['keys_idx', row_idx]
if key_idx in key_idx_matches:
return self.keys.row[key_idx]
raise ValueError("No key with name '%s' for container '%s' and field '%s'" % (key_name, container, field))
else:
if len(key_idx_matches) == 0:
# the key has never been used before
raise ValueError("key '%s' does not exist" % key_name)
elif len(key_idx_matches) > 1:
return [self.keys.row[i] for i in key_idx_matches]
else:
return self.keys.row[key_idx_matches[0]]
@docval({'name': 'resource_name', 'type': str, 'default': None})
def get_resource(self, **kwargs):
"""
Retrieve resource object with the given resource_name.
"""
resource_table_idx = self.resources.which(resource=kwargs['resource_name'])
if len(resource_table_idx) == 0:
# Resource hasn't been created
msg = "No resource '%s' exists. Use _add_resource to create a new resource" % kwargs['resource_name']
raise ValueError(msg)
else:
return self.resources.row[resource_table_idx[0]]
@docval({'name': 'container', 'type': (str, AbstractContainer), 'default': None,
'doc': ('the Container/Data object that uses the key or '
'the object_id for the Container/Data object that uses the key')},
| |
<gh_stars>1-10
'''
Function:
游戏进行中界面
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import json
import math
import random
import pygame
from ..sprites import Enemy
from ..sprites import Turret
from .....utils import QuitGame
from .pause import PauseInterface
from collections import namedtuple
'''游戏进行中界面'''
class GamingInterface():
def __init__(self, cfg, resource_loader):
self.cfg = cfg
self.resource_loader = resource_loader
# 游戏地图大小
map_w = self.cfg.SCREENSIZE[0]
map_h = 500
# 按钮大小和位置
button_w = 60
button_h = 60
button_y = 520
# 间隙
gap = 20
# 按钮放在工具栏, 工具栏两端各有一个信息显示框
toolbar_w = gap * 7 + button_w * 6
info_w = (self.cfg.SCREENSIZE[0] - toolbar_w) // 2
info_h = self.cfg.SCREENSIZE[1] - map_h
toolbar_h = self.cfg.SCREENSIZE[1] - map_h
# 界面布置
self.map_rect = pygame.Rect(0, 0, map_w, map_h)
self.map_surface = pygame.Surface((map_w, map_h))
self.leftinfo_rect = pygame.Rect(0, map_h, info_w, info_h)
self.rightinfo_rect = pygame.Rect(self.cfg.SCREENSIZE[0] - info_w, map_h, info_w, info_h)
self.toolbar_rect = pygame.Rect(info_w, map_h, toolbar_w, toolbar_h)
# 草
self.grass = resource_loader.images['game']['grass']
# 岩石(铺路用的)
self.rock = resource_loader.images['game']['rock']
# 污垢
self.dirt = resource_loader.images['game']['dirt']
# 水
self.water = resource_loader.images['game']['water']
# 灌木
self.bush = resource_loader.images['game']['bush']
# 纽带
self.nexus = resource_loader.images['game']['nexus']
# 洞穴
self.cave = resource_loader.images['game']['cave']
# 获取地图元素的大小,请保证素材库里组成地图的元素图大小一致
self.element_size = int(self.grass.get_rect().width)
# 一些字体
self.info_font = resource_loader.fonts['Calibri_s']
self.button_font = resource_loader.fonts['Calibri_l']
# 可以放炮塔的地方
self.placeable = {0: self.grass}
# 地图元素字典(数字对应.map文件中的数字)
self.map_elements = {
0: self.grass,
1: self.rock,
2: self.dirt,
3: self.water,
4: self.bush,
5: self.nexus,
6: self.cave
}
# 用于记录地图中的道路
self.path_list = []
# 当前的地图,将地图导入到这里面
self.current_map = dict()
# 当前鼠标携带的图标(即选中道具) -> [道具名, 道具]
self.mouse_carried = []
# 在地图上建造好了的炮塔
self.built_turret_group = pygame.sprite.Group()
# 所有的敌人
self.enemies_group = pygame.sprite.Group()
# 所有射出的箭
self.arrows_group = pygame.sprite.Group()
# 玩家操作用的按钮
Button = namedtuple('Button', ['rect', 'text', 'onClick'])
self.buttons = [
Button(pygame.Rect((info_w + gap), button_y, button_w, button_h), 'T1', self.takeT1),
Button(pygame.Rect((info_w + gap * 2 + button_w), button_y, button_w, button_h), 'T2', self.takeT2),
Button(pygame.Rect((info_w + gap * 3 + button_w * 2), button_y, button_w, button_h), 'T3', self.takeT3),
Button(pygame.Rect((info_w + gap * 4 + button_w * 3), button_y, button_w, button_h), 'XXX', self.takeXXX),
Button(pygame.Rect((info_w + gap * 5 + button_w * 4), button_y, button_w, button_h), 'Pause', self.pauseGame),
Button(pygame.Rect((info_w + gap * 6 + button_w * 5), button_y, button_w, button_h), 'Quit', QuitGame)
]
'''开始游戏'''
def start(self, screen, map_path=None, difficulty_path=None):
# 读取游戏难度对应的参数
with open(difficulty_path, 'r') as f:
difficulty_dict = json.load(f)
self.money = difficulty_dict.get('money')
self.health = difficulty_dict.get('health')
self.max_health = difficulty_dict.get('health')
difficulty_dict = difficulty_dict.get('enemy')
# 每60s生成一波敌人
generate_enemies_event = pygame.constants.USEREVENT + 0
pygame.time.set_timer(generate_enemies_event, 60000)
# 生成敌人的flag和当前已生成敌人的总次数
generate_enemies_flag = False
num_generate_enemies = 0
# 每0.5秒出一个敌人
generate_enemy_event = pygame.constants.USEREVENT + 1
pygame.time.set_timer(generate_enemy_event, 500)
generate_enemy_flag = False
# 防止变量未定义
enemy_range = None
num_enemy = None
# 是否手动操作箭塔射击
manual_shot = False
has_control = False
# 游戏主循环
while True:
if self.health <= 0:
return
for event in pygame.event.get():
if event.type == pygame.QUIT:
QuitGame()
if event.type == pygame.MOUSEBUTTONUP:
# --左键选物品
if event.button == 1:
# ----鼠标点击在地图上
if self.map_rect.collidepoint(event.pos):
if self.mouse_carried:
if self.mouse_carried[0] == 'turret':
self.buildTurret(event.pos)
elif self.mouse_carried[0] == 'XXX':
self.sellTurret(event.pos)
# ----鼠标点击在工具栏
elif self.toolbar_rect.collidepoint(event.pos):
for button in self.buttons:
if button.rect.collidepoint(event.pos):
if button.text == 'T1':
button.onClick()
elif button.text == 'T2':
button.onClick()
elif button.text == 'T3':
button.onClick()
elif button.text == 'XXX':
button.onClick()
elif button.text == 'Pause':
button.onClick(screen)
elif button.text == 'Quit':
button.onClick()
break
# --右键释放物品
if event.button == 3:
self.mouse_carried = []
# --按中间键手动控制炮塔射箭方向一次,否则自由射箭
if event.button == 2:
manual_shot = True
if event.type == generate_enemies_event:
generate_enemies_flag = True
if event.type == generate_enemy_event:
generate_enemy_flag = True
# --生成敌人, 生成的敌人随当前已生成敌人的总次数的增加而变强变多
if generate_enemies_flag:
generate_enemies_flag = False
num_generate_enemies += 1
idx = 0
for key, value in difficulty_dict.items():
idx += 1
if idx == len(difficulty_dict.keys()):
enemy_range = value['enemy_range']
num_enemy = value['num_enemy']
break
if num_generate_enemies <= int(key):
enemy_range = value['enemy_range']
num_enemy = value['num_enemy']
break
if generate_enemy_flag and num_enemy:
generate_enemy_flag = False
num_enemy -= 1
enemy = Enemy(random.choice(range(enemy_range)), self.cfg, self.resource_loader)
self.enemies_group.add(enemy)
# --射箭
for turret in self.built_turret_group:
if not manual_shot:
position = turret.position[0] + self.element_size // 2, turret.position[1]
arrow = turret.shot(position)
else:
position = turret.position[0] + self.element_size // 2, turret.position[1]
mouse_pos = pygame.mouse.get_pos()
angle = math.atan((mouse_pos[1] - position[1]) / (mouse_pos[0] - position[0] + 1e-6))
arrow = turret.shot(position, angle)
has_control = True
if arrow:
self.arrows_group.add(arrow)
else:
has_control = False
if has_control:
has_control = False
manual_shot = False
# --移动箭和碰撞检测
for arrow in self.arrows_group:
arrow.move()
points = [(arrow.rect.left, arrow.rect.top), (arrow.rect.left, arrow.rect.bottom), (arrow.rect.right, arrow.rect.top), (arrow.rect.right, arrow.rect.bottom)]
if (not self.map_rect.collidepoint(points[0])) and (not self.map_rect.collidepoint(points[1])) and \
(not self.map_rect.collidepoint(points[2])) and (not self.map_rect.collidepoint(points[3])):
self.arrows_group.remove(arrow)
del arrow
continue
for enemy in self.enemies_group:
if pygame.sprite.collide_rect(arrow, enemy):
enemy.life_value -= arrow.attack_power
self.arrows_group.remove(arrow)
del arrow
break
self.draw(screen, map_path)
'''将场景画到游戏界面上'''
def draw(self, screen, map_path):
self.drawToolbar(screen)
self.loadMap(screen, map_path)
self.drawMouseCarried(screen)
self.drawBuiltTurret(screen)
self.drawEnemies(screen)
self.drawArrows(screen)
pygame.display.flip()
'''画出所有射出的箭'''
def drawArrows(self, screen):
for arrow in self.arrows_group:
screen.blit(arrow.image, arrow.rect)
'''画敌人'''
def drawEnemies(self, screen):
for enemy in self.enemies_group:
if enemy.life_value <= 0:
self.money += enemy.reward
self.enemies_group.remove(enemy)
del enemy
continue
res = enemy.move(self.element_size)
if res:
coord = self.find_next_path(enemy)
if coord:
enemy.reached_path.append(enemy.coord)
enemy.coord = coord
enemy.position = self.coord2pos(coord)
enemy.rect.left, enemy.rect.top = enemy.position
else:
self.health -= enemy.damage
self.enemies_group.remove(enemy)
del enemy
continue
# 画血条
green_len = max(0, enemy.life_value / enemy.max_life_value) * self.element_size
if green_len > 0:
pygame.draw.line(screen, (0, 255, 0), (enemy.position), (enemy.position[0] + green_len, enemy.position[1]), 1)
if green_len < self.element_size:
pygame.draw.line(screen, (255, 0, 0), (enemy.position[0] + green_len, enemy.position[1]), (enemy.position[0] + self.element_size, enemy.position[1]), 1)
screen.blit(enemy.image, enemy.rect)
'''画已经建造好的炮塔'''
def drawBuiltTurret(self, screen):
for turret in self.built_turret_group:
screen.blit(turret.image, turret.rect)
'''画鼠标携带物'''
def drawMouseCarried(self, screen):
if self.mouse_carried:
position = pygame.mouse.get_pos()
coord = self.pos2coord(position)
position = self.coord2pos(coord)
# 在地图里再画
if self.map_rect.collidepoint(position):
if self.mouse_carried[0] == 'turret':
screen.blit(self.mouse_carried[1].image, position)
self.mouse_carried[1].coord = coord
self.mouse_carried[1].position = position
self.mouse_carried[1].rect.left, self.mouse_carried[1].rect.top = position
else:
screen.blit(self.mouse_carried[1], position)
'''画工具栏'''
def drawToolbar(self, screen):
# 信息显示框
info_color = (120, 20, 50)
# --左
pygame.draw.rect(screen, info_color, self.leftinfo_rect)
left_title = self.info_font.render('Player info:', True, (255, 255, 255))
money_info = self.info_font.render('Money: ' + str(self.money), True, (255, 255, 255))
health_info = self.info_font.render('Health: ' + str(self.health), True, (255, 255, 255))
screen.blit(left_title, (self.leftinfo_rect.left + 5, self.leftinfo_rect.top + 5))
screen.blit(money_info, (self.leftinfo_rect.left + 5, self.leftinfo_rect.top + 35))
screen.blit(health_info, (self.leftinfo_rect.left + 5, self.leftinfo_rect.top + 55))
# --右
pygame.draw.rect(screen, info_color, self.rightinfo_rect)
right_title = self.info_font.render('Selected info:', True, (255, 255, 255))
screen.blit(right_title, (self.rightinfo_rect.left + 5, self.rightinfo_rect.top + 5))
# 中间部分
pygame.draw.rect(screen, (127, 127, 127), self.toolbar_rect)
for button in self.buttons:
mouse_pos = pygame.mouse.get_pos()
if button.rect.collidepoint(mouse_pos):
self.showSelectedInfo(screen, button)
button_color = (0, 200, 0)
else:
button_color = (0, 100, 0)
pygame.draw.rect(screen, button_color, button.rect)
button_text = self.button_font.render(button.text, True, (255, 255, 255))
button_text_rect = button_text.get_rect()
button_text_rect.center = (button.rect.centerx, button.rect.centery)
screen.blit(button_text, button_text_rect)
'''显示被鼠标选中按钮的作用信息'''
def showSelectedInfo(self, screen, button):
if button.text in ['T1', 'T2', 'T3']:
turret = Turret({'T1': 0, 'T2': 1, 'T3': 2}[button.text], self.cfg, self.resource_loader)
selected_info1 = self.info_font.render('Cost: ' + str(turret.price), True, (255, 255, 255))
selected_info2 = self.info_font.render('Damage: ' + str(turret.arrow.attack_power), True, (255, 255, 255))
selected_info3 = self.info_font.render('Affordable: ' + str(self.money >= turret.price), True, (255, 255, 255))
screen.blit(selected_info1, (self.rightinfo_rect.left + 5, self.rightinfo_rect.top + 35))
screen.blit(selected_info2, (self.rightinfo_rect.left + 5, self.rightinfo_rect.top + 55))
screen.blit(selected_info3, (self.rightinfo_rect.left + 5, self.rightinfo_rect.top + 75))
elif button.text == 'XXX':
selected_info = self.info_font.render('Sell a turret', True, (255, 255, 255))
screen.blit(selected_info, (self.rightinfo_rect.left + 5, self.rightinfo_rect.top + 35))
elif button.text == 'Pause':
selected_info = self.info_font.render('Pause game', True, (255, 255, 255))
screen.blit(selected_info, (self.rightinfo_rect.left + 5, self.rightinfo_rect.top + 35))
elif button.text == 'Quit':
selected_info = self.info_font.render('Quit game', True, (255, 255, 255))
screen.blit(selected_info, (self.rightinfo_rect.left + 5, self.rightinfo_rect.top + 35))
'''出售炮塔(半价)'''
def sellTurret(self, position):
coord = self.pos2coord(position)
for turret in self.built_turret_group:
if coord == turret.coord:
self.built_turret_group.remove(turret)
self.money += int(turret.price * 0.5)
del turret
break
'''建造炮塔'''
def buildTurret(self, position):
turret = self.mouse_carried[1]
coord = self.pos2coord(position)
position = self.coord2pos(coord)
turret.position = position
turret.coord = coord
turret.rect.left, turret.rect.top = position
if self.money - turret.price >= 0:
if self.current_map.get(turret.coord) in self.placeable.keys():
self.money -= turret.price
self.built_turret_group.add(turret)
if self.mouse_carried[1].turret_type == 0:
self.mouse_carried = []
self.takeT1()
elif self.mouse_carried[1].turret_type == 1:
self.mouse_carried = []
self.takeT2()
elif self.mouse_carried[1].turret_type == 2:
self.mouse_carried = []
self.takeT3()
'''拿炮塔1'''
def takeT1(self):
T1 = Turret(0, self.cfg, self.resource_loader)
if self.money >= T1.price:
self.mouse_carried = ['turret', T1]
'''拿炮塔2'''
def takeT2(self):
T2 = Turret(1, self.cfg, self.resource_loader)
if self.money >= T2.price:
self.mouse_carried = ['turret', T2]
'''拿炮塔3'''
def takeT3(self):
T3 = Turret(2, self.cfg, self.resource_loader)
if self.money >= T3.price:
self.mouse_carried = ['turret', T3]
'''出售炮塔'''
def takeXXX(self):
XXX = self.resource_loader.images['game']['x']
self.mouse_carried = ['XXX', XXX]
'''找下一个路径单元'''
def find_next_path(self, enemy):
x, y = enemy.coord
# 优先级: 下右左上
neighbours = [(x, y+1), (x+1, y), (x-1, y), (x, y-1)]
for neighbour in neighbours:
if (neighbour in self.path_list) and (neighbour | |
import argparse
import numpy as np
import csv
import pandas as pd
import json
import scipy.sparse as sp
from sparsebm import (
SBM,
LBM,
ModelSelection,
generate_LBM_dataset,
generate_SBM_dataset,
)
from sparsebm.utils import reorder_rows, ARI
import logging
logger = logging.getLogger(__name__)
try:
import cupy
_DEFAULT_USE_GPU = True
except ImportError:
_DEFAULT_USE_GPU = False
def define_parsers():
main = argparse.ArgumentParser(prog="sparsebm")
subparsers = main.add_subparsers(
help="algorithm to use", dest="subparser_name"
)
sbm_parser = subparsers.add_parser(
"sbm", help="use the stochastic block model"
)
lbm_parser = subparsers.add_parser(
"lbm", help="use the latent block model"
)
ms_parser = subparsers.add_parser(
"modelselection", help="use the model selection with LBM or SBM"
)
input_grp = ms_parser.add_argument_group("mandatory arguments")
input_grp.add_argument(
"ADJACENCY_MATRIX", help="List of edges in CSV format"
)
input_grp.add_argument(
"-t",
"--type",
help="model to use. Either 'lbm' or 'sbm'",
required=True,
)
input_grp = ms_parser.add_argument_group("optional arguments")
input_grp.add_argument(
"-sep",
"--sep",
default=",",
help="CSV delimiter to use. Default is ',' ",
)
input_grp.add_argument(
"-gpu",
"--use_gpu",
help="specify if a GPU should be used.",
default=_DEFAULT_USE_GPU,
type=bool,
)
input_grp.add_argument(
"-idgpu",
"--gpu_index",
help="specify the gpu index if needed.",
default=None,
type=bool,
)
input_grp.add_argument(
"-s",
"--symmetric",
help="specify if the adajacency matrix is symmetric. For sbm only",
default=False,
)
input_grp.add_argument(
"-p", "--plot", help="display model exploration plot", default=True
)
output_grp = ms_parser.add_argument_group("output")
output_grp.add_argument(
"-o",
"--output",
help="File path for the json results.",
default="results.json",
)
generate_sbm_parser = subparsers.add_parser(
"generate", help="use sparsebm to generate a data matrix"
)
subparsers_generate = generate_sbm_parser.add_subparsers(
help="model to generate data with", dest="subparsers_generate_name"
)
sbm_generation_parser = subparsers_generate.add_parser(
"sbm", help="use the stochastic block model to generate data"
)
lbm_generation_parser = subparsers_generate.add_parser(
"lbm", help="use the latent block model to generate data"
)
help_example_base = """A json configuration file that specify the parameters
of the data to generate. If no file is given a random graph is generated."""
help_sbm_gen = """\n Example of json configuration file for SBM: \n{\n
"type": "sbm",\n "number_of_nodes": 1000,\n "number_of_clusters": 4,\n
"symmetric": true,\n "connection_probabilities": [\n [\n 0.1,\n
0.036,\n 0.012,\n 0.0614\n ],\n [\n 0.036,\n
0.074,\n 0,\n 0\n ],\n [\n 0.012,\n 0,\n
0.11,\n 0.024\n ],\n [\n 0.0614,\n 0,\n
0.024,\n 0.086\n ]\n ],\n "cluster_proportions": [\n 0.25
,\n 0.25,\n 0.25,\n 0.25\n ]\n}"""
sbm_generation_parser.add_argument(
"-f",
"--file",
default=None,
help=help_example_base + help_sbm_gen,
required=False,
)
lbm_generation_parser.add_argument(
"-f", "--file", default=None, help=help_example_base, required=False
)
for parser in [sbm_parser, lbm_parser]:
input_grp = parser.add_argument_group("mandatory arguments")
input_grp.add_argument(
"ADJACENCY_MATRIX", help="List of edges in CSV format"
)
if parser == lbm_parser:
input_grp.add_argument(
"-k1",
"--n_row_clusters",
help="number of row clusters",
default=4,
type=int,
required=True,
)
input_grp.add_argument(
"-k2",
"--n_column_clusters",
help="number of row clusters",
default=4,
type=int,
required=True,
)
if parser == sbm_parser:
input_grp.add_argument(
"-k",
"--n_clusters",
help="number of clusters",
default=4,
type=int,
required=True,
)
output_grp = parser.add_argument_group("output")
output_grp.add_argument(
"-o",
"--output",
help="File path for the json results.",
default="results.json",
)
param_grp = parser.add_argument_group("optional arguments")
param_grp.add_argument(
"-sep",
"--sep",
default=",",
help="CSV delimiter to use. Default is ',' ",
)
if parser == sbm_parser:
param_grp.add_argument(
"-s",
"--symmetric",
help="Specify if the adajacency matrix is symmetric",
default=False,
# type=bool,
)
param_grp.add_argument(
"-niter",
"--max_iter",
help="Maximum number of EM step",
default=10000,
type=int,
)
param_grp.add_argument(
"-ninit",
"--n_init",
help="Number of initializations that will be run",
default=100,
type=int,
)
param_grp.add_argument(
"-early",
"--n_iter_early_stop",
help="Number of EM steps to perform for each initialization.",
default=10,
type=int,
)
param_grp.add_argument(
"-ninitt",
"--n_init_total_run",
help="Number of the best initializations that will be run\
until convergence.",
default=2,
type=int,
)
param_grp.add_argument(
"-t",
"--tol",
help="Tolerance of likelihood to declare convergence.",
default=1e-4,
type=float,
)
param_grp.add_argument(
"-v",
"--verbosity",
help="Degree of verbosity. Scale from 0 (no message displayed)\
to 3.",
default=1,
type=int,
)
param_grp.add_argument(
"-gpu",
"--use_gpu",
help="Specify if a GPU should be used.",
default=_DEFAULT_USE_GPU,
type=bool,
)
param_grp.add_argument(
"-idgpu",
"--gpu_index",
help="Specify the gpu index if needed.",
default=None,
type=bool,
)
return main
def graph_from_csv(file, type, sep=","):
try:
pda = pd.read_csv(file, sep=sep, header=None)
npa = pda[[0, 1]].to_numpy()
if type == "sbm":
node_i_from = np.unique(npa)
node_i_to = np.arange(node_i_from.size)
i_mapping = {
f: t for f, t in np.stack((node_i_from, node_i_to), 1)
}
rows = pda[0].map(i_mapping)
cols = pda[1].map(i_mapping)
graph = sp.coo_matrix(
(np.ones(npa.shape[0]), (rows, cols)),
shape=(node_i_from.size, node_i_from.size),
)
return graph, i_mapping, None
else:
node_i_from = np.unique(npa[:, 0])
node_i_to = np.arange(node_i_from.size)
i_mapping = {
f: t for f, t in np.stack((node_i_from, node_i_to), 1)
}
rows = pda[0].map(i_mapping)
node_j_from = np.unique(npa[:, 1])
node_j_to = np.arange(node_j_from.size)
j_mapping = {
f: t for f, t in np.stack((node_j_from, node_j_to), 1)
}
cols = pda[1].map(j_mapping)
graph = sp.coo_matrix(
(np.ones(npa.shape[0]), (rows, cols)),
shape=(node_i_from.size, node_j_from.size),
)
return graph, i_mapping, j_mapping
except Exception as e:
logger.error(e)
raise e
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def process_sbm(args):
graph, row_from_to, _ = graph_from_csv(
args["ADJACENCY_MATRIX"], args["subparser_name"], sep=args["sep"]
)
model = SBM(
max_iter=args["max_iter"],
n_clusters=args["n_clusters"],
n_init=args["n_init"],
n_iter_early_stop=args["n_iter_early_stop"],
n_init_total_run=args["n_init_total_run"],
verbosity=args["verbosity"],
atol=args["tol"],
use_gpu=args["use_gpu"],
gpu_index=args["gpu_index"],
)
symmetric = str2bool(args["symmetric"])
logger.info(
"Runing with symmetric adjacency matrix : {}".format(symmetric)
)
model.fit(graph, symmetric=symmetric)
if not model.trained_successfully:
logger.error("FAILED, model has not been trained successfully.")
return None
logger.info("Model has been trained successfully.")
logger.info(
"Value of the Integrated Completed Loglikelihood is {:.4f}".format(
model.get_ICL()
)
)
labels = model.labels
groups = [
np.argwhere(labels == q).flatten() for q in range(args["n_clusters"])
]
row_to_from = {v: k for k, v in row_from_to.items()}
groups = [pd.Series(g).map(row_to_from).tolist() for g in groups]
results = {
"ILC": model.get_ICL(),
"edge_probability_between_groups": model.pi_.tolist(),
"group_membership_probability": model.group_membership_probability.flatten().tolist(),
"node_ids_clustered": groups,
}
with open(args["output"], "w") as outfile:
json.dump(results, outfile)
logger.info("Results saved in {}".format(args["output"]))
def process_lbm(args):
graph, row_from_to, col_from_to = graph_from_csv(
args["ADJACENCY_MATRIX"], args["subparser_name"], sep=args["sep"]
)
model = LBM(
max_iter=args["max_iter"],
n_row_clusters=args["n_row_clusters"],
n_column_clusters=args["n_column_clusters"],
n_init=args["n_init"],
n_iter_early_stop=args["n_iter_early_stop"],
n_init_total_run=args["n_init_total_run"],
verbosity=args["verbosity"],
atol=args["tol"],
use_gpu=args["use_gpu"],
gpu_index=args["gpu_index"],
)
model.fit(graph)
if not model.trained_successfully:
logger.error("FAILED, model has not been trained successfully.")
return None
logger.info("Model has been trained successfully.")
logger.info(
"Value of the Integrated Completed Loglikelihood is {:.4f}".format(
model.get_ICL()
)
)
row_labels = model.row_labels
row_groups = [
np.argwhere(row_labels == q).flatten()
for q in range(args["n_row_clusters"])
]
row_to_from = {v: k for k, v in row_from_to.items()}
row_groups = [pd.Series(g).map(row_to_from).tolist() for g in row_groups]
col_labels = model.column_labels
col_groups = [
np.argwhere(col_labels == q).flatten()
for q in range(args["n_column_clusters"])
]
col_to_from = {v: k for k, v in col_from_to.items()}
col_groups = [pd.Series(g).map(col_to_from).tolist() for g in col_groups]
results = {
"ILC": model.get_ICL(),
"edge_probability_between_groups": model.pi_.tolist(),
"row_group_membership_probability": model.row_group_membership_probability.flatten().tolist(),
"column_group_membership_probability": model.column_group_membership_probability.flatten().tolist(),
"node_type_1_ids_clustered": row_groups,
"node_type_2_ids_clustered": col_groups,
}
with open(args["output"], "w") as outfile:
json.dump(results, outfile)
logger.info("Results saved in {}".format(args["output"]))
def generate_sbm(args):
if "JSON_FILE" in args:
with open(args["JSON_FILE"]) as f:
conf = json.load(f)
else:
conf = {}
number_of_nodes = (
conf["number_of_nodes"] if "number_of_nodes" in conf else None
)
number_of_clusters = (
conf["number_of_clusters"] if "number_of_clusters" in conf else None
)
connection_probabilities = (
np.array(conf["connection_probabilities"])
if "connection_probabilities" in conf
else None
)
cluster_proportions = (
np.array(conf["cluster_proportions"])
if "cluster_proportions" in conf
else None
)
symmetric = conf["symmetric"] if "symmetric" in conf else False
dataset = generate_SBM_dataset(
number_of_nodes,
number_of_clusters,
connection_probabilities,
cluster_proportions,
symmetric=symmetric,
)
graph = dataset["data"]
graph = np.stack((graph.row, graph.col), 1)
cluster_indicator = dataset["cluster_indicator"]
labels = cluster_indicator.argmax(1)
number_of_clusters = cluster_indicator.shape[1]
groups = [
np.argwhere(labels == q).flatten().tolist()
for q in range(number_of_clusters)
]
results = {
"node_ids_grouped": groups,
"number_of_nodes": number_of_nodes,
"number_of_clusters": number_of_clusters,
"connection_probabilities": connection_probabilities.flatten().tolist()
if connection_probabilities
else None,
"cluster_proportions": cluster_proportions.tolist()
if cluster_proportions
else None,
}
file_groups = "./groups.json"
file_edges = "./edges.csv"
with open(file_groups, "w") as outfile:
json.dump(results, outfile)
logger.info("\n Groups and params saved in {}".format(file_groups))
np.savetxt(file_edges, graph, delimiter=",")
logger.info("Edges saved in {}".format(file_edges))
def generate_lbm(args):
if "JSON_FILE" in args:
with open(args["JSON_FILE"]) as f:
conf = json.load(f)
else:
conf = {}
number_of_rows = (
conf["number_of_rows"] if "number_of_rows" in conf else None
)
number_of_columns = (
conf["number_of_columns"] if "number_of_columns" in conf else None
)
nb_row_clusters = (
conf["nb_row_clusters"] if "nb_row_clusters" in conf else None
)
nb_column_clusters = (
conf["nb_column_clusters"] if "nb_column_clusters" in conf else None
)
connection_probabilities = (
np.array(conf["connection_probabilities"])
if "connection_probabilities" in conf
else None
)
row_cluster_proportions = (
np.array(conf["row_cluster_proportions"])
if "row_cluster_proportions" in conf
else None
)
column_cluster_proportions = (
np.array(conf["column_cluster_proportions"])
if "column_cluster_proportions" in conf
else None
)
dataset = generate_LBM_dataset(
number_of_rows,
number_of_columns,
nb_row_clusters,
nb_column_clusters,
connection_probabilities,
row_cluster_proportions,
column_cluster_proportions,
)
graph = dataset["data"]
number_of_rows, number_of_columns = graph.shape
graph = np.stack((graph.row, graph.col), 1)
row_cluster_indicator = dataset["row_cluster_indicator"]
column_cluster_indicator = dataset["column_cluster_indicator"]
row_labels = row_cluster_indicator.argmax(1)
col_labels = column_cluster_indicator.argmax(1)
nb_row_clusters = row_cluster_indicator.shape[1]
nb_column_clusters = column_cluster_indicator.shape[1]
row_groups = [
np.argwhere(row_labels == q).flatten().tolist()
for q in range(nb_row_clusters)
]
col_groups = [
np.argwhere(col_labels == q).flatten().tolist()
for q in range(nb_column_clusters)
]
results = {
"row_ids_grouped": row_groups,
"column_ids_grouped": col_groups,
| |
< r_br)
& (counts_df[c_tag] > c_tl) & (counts_df[c_tag] < c_br),:]
return overlapping_ref_df
# TODO adjust the registration with dots (triangulation)
def register_cpl(cpl, chunk_coords, experiment_fpath,
stitching_channel,
reference_round):
logger = selected_logger()
registration = {}
experiment_fpath = Path(experiment_fpath)
try:
counts1_fpath = list((experiment_fpath / 'results').glob('*decoded_fov_' + str(cpl[0]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[0]}')
else:
try:
counts2_fpath = list((experiment_fpath / 'results').glob('*decoded_fov_' + str(cpl[1]) + '.parquet'))[0]
except:
logger.error(f'count file missing for fov {cpl[1]}')
else:
counts1_df = pd.read_parquet(counts1_fpath)
counts2_df = pd.read_parquet(counts2_fpath)
count1_grp = counts1_df.loc[(counts1_df.channel == stitching_channel) &
(counts1_df.round_num == reference_round),:]
count2_grp = counts2_df.loc[(counts2_df.channel == stitching_channel) &
(counts2_df.round_num == reference_round),:]
count1_grp = counts1_df.loc[counts1_df.channel == stitching_channel,:]
count2_grp = counts2_df.loc[counts2_df.channel == stitching_channel,:]
overlap_count1 = get_all_dots_in_overlapping_regions(count1_grp, chunk_coords,stitching_selected='microscope_stitched')
overlap_count2 = get_all_dots_in_overlapping_regions(count2_grp, chunk_coords,stitching_selected='microscope_stitched')
if overlap_count1.empty or overlap_count2.empty:
shift = np.array([1000,1000])
registration[cpl] = [shift, np.nan]
else:
# TODO
# Maybe add a selction step where if the number of beads is below X the beads based registration will be run or fft based
# registration if the number of beads is high enough
r_tl = chunk_coords[0]
c_tl = chunk_coords[2]
img_shape = np.array([np.abs(chunk_coords[1]-chunk_coords[0]),np.abs(chunk_coords[3]-chunk_coords[2])]).astype('int') + 1
norm_ref_coords = overlap_count1.loc[:,['r_px_microscope_stitched','c_px_microscope_stitched']].to_numpy() -[r_tl, c_tl]
norm_comp_coords = overlap_count2.loc[:,['r_px_microscope_stitched','c_px_microscope_stitched']].to_numpy() -[r_tl, c_tl]
img_ref = create_fake_image(img_shape, norm_ref_coords)
img_tran = create_fake_image(img_shape, norm_comp_coords)
shift, error, diffphase = register_translation(img_ref, img_tran)
registration[cpl] = [shift, error]
return registration
def register_cpl_fresh_nuclei(cpl: Tuple, chunk_coords: np.ndarray, order: dict,
metadata:dict, experiment_fpath:str):
"""Function to register orverlapping regions of nuclear staining for stitching
Args:
cpl (Tuple): overlapping tiles
chunk_coords (np.ndarray): coords of the overlapping region [r_tl,r_br,c_tl,c_br]
order (dict): description of the position of the tiles
metadata (dict): dictionary with the general experiment data
experiment_fpath (str): path to the experiment to process
Returns:
dict: registration output [shift, error]
"""
logger = selected_logger()
registration = {}
experiment_fpath = Path(experiment_fpath)
img_width = metadata['img_width']
img_height = metadata['img_height']
experiment_name = metadata['experiment_name']
error = 0
filtered_nuclei_fpath = experiment_fpath / 'fresh_tissue' / 'fresh_tissue_nuclei_preprocessed_img_data.zarr'
try:
st = zarr.DirectoryStore(filtered_nuclei_fpath)
root = zarr.group(store=st, overwrite=False)
except:
logger.error(f'cannot load the zarr files with filtered nuclei')
else:
try:
img1 = root[experiment_name + '_fresh_tissue_nuclei_fov_' + str(cpl[0])]['preprocessed_data_fov_'+str(cpl[0])][...]
except:
logger.error(f'image file cannot be loaded for nuclei of fov {cpl[0]}')
else:
try:
img2 = root[experiment_name + '_fresh_tissue_nuclei_fov_' + str(cpl[1])]['preprocessed_data_fov_'+str(cpl[1])][...]
except:
logger.error(f'image file cannot be loaded for nuclei of fov {cpl[1]}')
else:
img_shape = np.array([np.abs(chunk_coords[1]-chunk_coords[0]),np.abs(chunk_coords[3]-chunk_coords[2])]).astype('int')
if order == {'row_order': ('top', 'bottom'), 'column_order': ('right', 'left')}:
img1_slice = img1[(img_height-img_shape[0]):img_height,0:img_shape[1]]
img2_slice = img2[0:img_shape[0],(img_width-img_shape[1]):img_width]
elif order == {'row_order': ('top', 'bottom'), 'column_order': ('left', 'right')}:
img1_slice = img1[img_height-img_shape[0]:img_height,img_width-img_shape[1]:img_width]
img2_slice = img2[0:img_shape[0],0:img_shape[1]]
elif order == {'row_order': ('bottom', 'top'), 'column_order': ('left', 'right')}:
img1_slice = img1[0:img_shape[0],img_width-img_shape[1]:img_width]
img2_slice = img2[img_height-img_shape[0]:img_height,0:img_shape[1]]
elif order == {'row_order': ('bottom', 'top'), 'column_order': ('right', 'left')}:
img1_slice = img1[0:img_shape[0],0:img_shape[1]]
img2_slice = img2[img_height-img_shape[0]:img_height,img_width-img_shape[1]:img_width]
else:
logger.error(f'unknown fovs order')
error = 1
if error:
shift = np.array([1000,1000])
registration[cpl] = [shift, np.nan]
else:
shift, error, diffphase = register_translation(img1_slice, img2_slice)
registration[cpl] = [shift, error]
return registration
def stitching_graph(experiment_fpath, stitching_channel,tiles_org, metadata,
reference_round, client, nr_dim = 2):
logger = selected_logger()
unfolded_overlapping_regions_dict = {key:value for (k,v) in tiles_org.overlapping_regions.items() for (key,value) in v.items()}
futures = []
for cpl, chunk_coords in unfolded_overlapping_regions_dict.items():
future = client.submit(register_cpl,cpl, chunk_coords, experiment_fpath,stitching_channel,
reference_round)
futures.append(future)
all_registrations = client.gather(futures)
all_registrations = [reg for reg in all_registrations if reg ]
all_registrations_dict = {}
for output_dict in all_registrations:
all_registrations_dict.update(output_dict)
# Run registration only if there are not too many overlappig regions without
# dots
# counts_cpls_missing_overlapping_dots = 0
# cpls_missing_overlapping_dots = []
# for cpl, registration_output in all_registrations_dict.items():
# if np.isnan(registration_output[1]):
# cpls_missing_overlapping_dots.append(cpl)
# counts_cpls_missing_overlapping_dots += 1
# global_stitching_done = 0
# if len(cpls_missing_overlapping_dots) > 10:
# logger.error(f"Too many cpl of fovs without overlapping reference dots")
# pickle.dump([cpls_missing_overlapping_dots,counts_cpls_missing_overlapping_dots ],
# open(experiment_fpath / 'results' / 'fovs_without_overlapping_reference_dots_no_global_stitching.pkl','rb'))
# global_stitching_done = 0
# return tiles_org.tile_corners_coords_pxl, global_stitching_done
# else:
# global_stitching_done = 1
# logger.error(f"The number of cpls of fovs without overlapping reference dots is low, test global stitching")
# pickle.dump([cpls_missing_overlapping_dots,counts_cpls_missing_overlapping_dots ],
# open(experiment_fpath / 'results' / 'fovs_without_overlapping_reference_dots_yes_global_stitching.pkl','wb'))
overlapping_coords_reorganized = {}
for idx, cpl_dict in tiles_org.overlapping_regions.items():
overlapping_coords_reorganized.update(cpl_dict)
all_registrations_removed_large_shift = {k:v for (k,v) in all_registrations_dict.items() if np.all(np.abs(v[0]) < 20)}
cpls = all_registrations_removed_large_shift.keys()
# cpls = list(unfolded_overlapping_regions_dict.keys())
total_cpls = len(cpls)
nr_tiles = tiles_org.tile_corners_coords_pxl.shape[0]
weights_err1 = np.zeros((total_cpls * nr_dim))
weights_err2 = np.zeros((total_cpls * nr_dim))
P = np.zeros(total_cpls * nr_dim)
ZQ = np.zeros((total_cpls * nr_dim,nr_tiles * nr_dim))
weights_err = np.zeros((total_cpls * nr_dim))
for i, (a, b) in enumerate(cpls):
shift = all_registrations_removed_large_shift[(a,b)][0]
dr = shift[0]
dc = shift[1]
P[i * nr_dim] = dr
P[i * nr_dim +1 ] = dc
weights_err[i * nr_dim:i * nr_dim + nr_dim] = all_registrations_removed_large_shift[(a,b)][1]
for i, (a, b) in enumerate(cpls):
# Y row:
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a:nr_dim * a + 1] = -1
Z[nr_dim * b:nr_dim * b + 1] = 1
ZQ[i * nr_dim, :] = Z
# X row
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a + 1:nr_dim * a + 2] = -1
Z[nr_dim * b + 1:nr_dim * b + 2] = 1
ZQ[i * nr_dim + 1, :] = Z
lrg = linmod.LinearRegression(fit_intercept=False)
lrg.fit(ZQ,P)
global_translrg = lrg.coef_.reshape(nr_tiles, nr_dim)
gb = -1 * (-lrg.coef_.reshape((nr_tiles, nr_dim)) \
+ lrg.coef_.reshape((nr_tiles, nr_dim))[0:1, :])
global_shift = gb.astype(int)
adjusted_coords = tiles_org.tile_corners_coords_pxl + global_shift
# Determine shift of missing tiles
out_level = 1000
low = np.where(global_shift< -out_level)[0]
high = np.where(global_shift> out_level)[0]
low_high = np.hstack((low,high))
missing_tiles_id = np.unique(low_high)
missing_tiles_coords = tiles_org.tile_corners_coords_pxl[missing_tiles_id,:]
if missing_tiles_coords.shape[0] >0:
coords_cl = np.delete(tiles_org.tile_corners_coords_pxl, missing_tiles_id, 0)
ad_coords_cl = np.delete(adjusted_coords, missing_tiles_id, 0 )
tst = linmod.LinearRegression(fit_intercept=False)
tst.fit(coords_cl,ad_coords_cl)
corrected_missing = tst.predict(missing_tiles_coords)
for idx, tile_id in enumerate(missing_tiles_id):
adjusted_coords[tile_id] = corrected_missing[idx]
dec_fpath = (experiment_fpath / 'results').glob('*_decoded_fov*')
for fpath in dec_fpath:
global_stitched_decoded_df = stitch_using_coords_general(fpath,
adjusted_coords,
tiles_org.reference_corner_fov_position,
metadata,
'global_stitched')
if isinstance(global_stitched_decoded_df,pd.DataFrame):
global_stitched_decoded_df.to_parquet(fpath)
global_shift = tiles_org.tile_corners_coords_pxl - adjusted_coords
pickle.dump(global_shift,open(experiment_fpath / 'results'/ 'stitching_global_shift.pkl','wb'))
pickle.dump(adjusted_coords,open(experiment_fpath / 'results'/ 'global_stitched_coords.pkl','wb'))
return adjusted_coords
# return adjusted_coords, global_stitching_done
def stitching_graph_fresh_nuclei(experiment_fpath,tiles_org, metadata,
client, nr_dim = 2):
logger = selected_logger()
unfolded_overlapping_regions_dict = {key:value for (k,v) in tiles_org.overlapping_regions.items() for (key,value) in v.items()}
unfolded_overlapping_order_dict = {key:value for (k,v) in tiles_org.overlapping_order.items() for (key,value) in v.items()}
futures = []
for cpl, chunk_coords in unfolded_overlapping_regions_dict.items():
future = client.submit(register_cpl_fresh_nuclei,cpl, chunk_coords,
unfolded_overlapping_order_dict[cpl],
metadata,
experiment_fpath)
futures.append(future)
all_registrations = client.gather(futures)
all_registrations = [reg for reg in all_registrations if reg ]
all_registrations_dict = {}
for output_dict in all_registrations:
all_registrations_dict.update(output_dict)
overlapping_coords_reorganized = {}
for idx, cpl_dict in tiles_org.overlapping_regions.items():
overlapping_coords_reorganized.update(cpl_dict)
all_registrations_removed_large_shift = {k:v for (k,v) in all_registrations_dict.items() if np.all(np.abs(v[0]) < 20)}
cpls = all_registrations_removed_large_shift.keys()
# cpls = list(unfolded_overlapping_regions_dict.keys())
total_cpls = len(cpls)
nr_tiles = tiles_org.tile_corners_coords_pxl.shape[0]
weights_err1 = np.zeros((total_cpls * nr_dim))
weights_err2 = np.zeros((total_cpls * nr_dim))
P = np.zeros(total_cpls * nr_dim)
ZQ = np.zeros((total_cpls * nr_dim,nr_tiles * nr_dim))
weights_err = np.zeros((total_cpls * nr_dim))
for i, (a, b) in enumerate(cpls):
shift = all_registrations_removed_large_shift[(a,b)][0]
dr = shift[0]
dc = shift[1]
P[i * nr_dim] = dr
P[i * nr_dim +1 ] = dc
weights_err[i * nr_dim:i * nr_dim + nr_dim] = all_registrations_removed_large_shift[(a,b)][1]
for i, (a, b) in enumerate(cpls):
# Y row:
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a:nr_dim * a + 1] = -1
Z[nr_dim * b:nr_dim * b + 1] = 1
ZQ[i * nr_dim, :] = Z
# X row
Z = np.zeros((nr_tiles * nr_dim))
Z[nr_dim * a + 1:nr_dim * a + 2] = -1
Z[nr_dim * b + 1:nr_dim * b + 2] = 1
ZQ[i * nr_dim + 1, :] = Z
lrg = linmod.LinearRegression(fit_intercept=False)
lrg.fit(ZQ,P)
global_translrg = lrg.coef_.reshape(nr_tiles, nr_dim)
gb = -1 * (-lrg.coef_.reshape((nr_tiles, nr_dim)) \
+ lrg.coef_.reshape((nr_tiles, nr_dim))[0:1, :])
global_shift = gb.astype(int)
adjusted_coords = tiles_org.tile_corners_coords_pxl + global_shift
# Determine shift of missing tiles
out_level = 1000
low = np.where(global_shift< -out_level)[0]
high = np.where(global_shift> out_level)[0]
low_high = np.hstack((low,high))
missing_tiles_id = np.unique(low_high)
missing_tiles_coords = tiles_org.tile_corners_coords_pxl[missing_tiles_id,:]
if missing_tiles_coords.shape[0] >0:
coords_cl = np.delete(tiles_org.tile_corners_coords_pxl, missing_tiles_id, 0)
ad_coords_cl = np.delete(adjusted_coords, missing_tiles_id, 0 )
tst = linmod.LinearRegression(fit_intercept=False)
tst.fit(coords_cl,ad_coords_cl)
corrected_missing = tst.predict(missing_tiles_coords)
for idx, tile_id in enumerate(missing_tiles_id):
adjusted_coords[tile_id] = corrected_missing[idx]
dec_fpath = (experiment_fpath / 'fresh_tissue'/ 'results').glob('*_decoded_fov*')
for fpath in dec_fpath:
global_stitched_decoded_df = stitch_using_coords_general(fpath,
adjusted_coords,
tiles_org.reference_corner_fov_position,
metadata,
'global_stitched_nuclei')
if isinstance(global_stitched_decoded_df,pd.DataFrame):
global_stitched_decoded_df.to_parquet(fpath)
global_shift = tiles_org.tile_corners_coords_pxl - adjusted_coords
pickle.dump(global_shift,open(experiment_fpath / 'fresh_tissue' / 'results'/ 'stitching_global_shift.pkl','wb'))
| |
'''
units.py
Module to work with S.I. units
History:
3/04/2018 : First version
4/04/2018 : Addition of non S.I. units and Physics constants
5/04/2018 : Modifications to work with numpy arrays
Connection with sympy
6/04/2018 : Connection with calc
9/04/2018 : Units as text in make* functions
10/04/2018 : Corrections in unitless and Hz
11/04/2018 : Addition of setSciForPrint
13/06/2018 : Addition of makeRange function
6/09/2018 : Added methods: __getitem__ , strip , __len__
'''
# Python 2.7 compatibility
from __future__ import print_function
from __future__ import division
version = '6/09/2018'
# Basic imports
import numpy as np
import inspect
# Try to load sympy
try:
import sympy
except:
sympy_imported = False
else:
sympy_imported = True
# Try to load calc
try:
import calc
except:
calc_imported = False
else:
calc_imported = True
# Try to load mmVars
try:
import mmVars as mm
except:
mmVars_imported = False
else:
mmVars_imported = True
# Exception code ######################################################
class unitsEx(Exception):
def __init__(self, msg=""):
self.msg = msg
print("\n** Units Exception")
print('** ' + msg)
print("\n")
# Globals ############################################################
# Number of base units
nbase = 7
# Vector with no units
v_none = np.array([0,0,0,0,0,0,0])
# Base unit names
v_names = ['m','kg','s','A','K','mol','cd']
# Indicate if we use sci for __str__
sci4str = False
# Number presentation code ###########################################
# This is the same code included in calc.py
def f2s(v,nd=None):
"""
f2s (float2string)
Takes one float value and converts it to string
If greater or equal than 1000, uses two decimal places
If greater than one, uses three decimal places
if less than one, uses three significant decimal places
The optional parameter can fix the number of significant digits
"""
# Base number of decimals
a = abs(v)
if nd == None:
ndec = 3
if (a>=1000): ndec = 2
if (a<1):
if a!=0: ndec = int(np.floor(3-np.log10(a)))
else:
ndec = nd
if (a<1):
if a!=0: ndec = int(np.floor(nd-np.log10(a)))
# Check for significance
v2 = np.floor(v*10**ndec)/(10**ndec)
for i in range(0,ndec):
if np.floor(v2) == v2:
ndec = i+1
break
v2 = v2*10.0
# Return string
return ('{0:.%df}' % ndec).format(v)
def f2sci(v,unit='',nd=3,prefix=True,sep=''):
"""
Takes one float and converts it to scientific notation
Required parameters
v : Number to convert
Optional parameters
unit : Unit to show
nd : Number of decimal places (Default to 3)
prefix : Use standard prefixes for powers of 10 up to +/-18
sep : Separator between prefix and unit (Defaults to none)
"""
potH=['k','M','G','T','P','E']
potL=['m','u','n','p','f','a']
a = abs(v)
ndec = int(np.floor(np.log10(a)))
pot = int(np.floor(ndec/3))
exp = 3*pot
base = v/(10.0**exp)
s = f2s(base,nd)
if pot==0:
return s + ' ' + unit
if (prefix):
if 1 <= pot <=6:
s = s + ' ' + potH[pot-1] + sep + unit
return s
if 1 <= -pot <=6:
s = s + ' '+ potL[-pot-1] + sep + unit
return s
s = s + 'E' + ('{:+d}').format(exp) + ' ' + unit
return s
def _unitName(unit):
"""
Special internal function for printVar
"""
if isinstance(unit,uVar):
return unit.name
return unit
def _usePrefix(unit,prefix):
"""
Special internal function for printVar
"""
if isinstance(unit,uVar):
if unit.complex:
return False
else:
return prefix
return prefix
def _printUnit(unit):
"""
Special internal function for printVar
"""
if unit == '':
print()
return
print(' '+_unitName(unit))
def printVar(name,value=None,unit='',sci=True,prefix=True,sep='',level=0):
"""
Print a variable name, value and units
Required parameters:
name : Name of the variable
Optional parameters:
value : value of to show (Defaults to be obtained from globals)
unit : units to use (Defaults to self ones)
sci : use sci numeric format (Defaults to True)
prefix : use powers of 10 prefixes (Defaults to True)
sep : separator between prefix and units (Defaults to none)
Internal use parameters:
level : stack level of caller to inspect globals
"""
# Try to evaluate variable if not given
if value is None:
value = _getVar(name,level=level+1)
# Code if value is not an uVar object
if not isinstance(value,uVar):
# Special case for numpy arrays
if isinstance(value,np.ndarray):
print(name + ' = ' + str(value),end='')
_printUnit(unit)
if sci:
prefix = _usePrefix(unit,prefix)
print(name + " = " + f2sci(value,_unitName(unit),prefix=prefix,sep=sep))
else:
print(name + " = " + f2s(value) + " " + _unitName(unit))
return
# Code if value is a uVar object
if unit != "" and not isinstance(unit,uVar):
raise unitsEx('If value is an uVar object, unit shall be of the same kind')
if unit != "":
value=value.convert(unit)
if isinstance(value.value,np.ndarray):
print(name + " = " + str(value))
return
if sci:
print(name + " = " + value.sci(prefix=prefix,sep=sep))
else:
print(name + " = " + str(value))
def printUnit(name,unit='',sci=True,prefix=True,sep=''):
"""
Special call to printVar without value
"""
printVar(name,value=None,unit=unit,sci=sci,prefix=prefix,sep=sep,level=1)
def setSciForPrint(flag=True):
global sci4str
sci4str = flag
# uVar class #########################################################
class uVar:
# Constructor -----------------------------------------------------
def __init__(self,name,vector,value=1.0,scale=1.0):
"""
uVar constructor
uVar(name,vector,value=1.0)
Creates a new uVar object
Parameters:
name : Name of the units
vector : Vector of base units exponents
value : Numeric value (defaults to 1.0)
scale : Scaling factor from S.I. units
Returns a new uVar object
"""
if len(vector)!=7:
raise unitsEx('Constructor vector size must be seven')
self.vector = np.array(vector) # Base unit powers
self.value = value # Numerical value
self.name = name # Name of the units
self.complex = False # No single name for units
self.scale = scale # Scaling factor for this unit
# Copy object
def copy(self):
"""
Returns a copy of the current object
"""
res = uVar(self.name,self.vector,self.value,self.scale)
res.complex = self.complex
return res
# Get internal elements -----------------------------------------------------
def get_value(self):
"""
Returns the numeric value of the object
"""
return self.value
def get_vector(self):
"""
Return the vector of base units exponents
"""
return self.vector
def get_name(self):
"""
Return the name of the object units
"""
return self.name
# Set internal elements -----------------------------------------------------
def set_name(self,name,complex=False):
"""
Set the name of the object units
If complex is false (default) power of 10 prefixed could be used
"""
self.name=name
self.complex = complex
def set_value(self,value):
"""
Returns the numeric value of the object
"""
self.value = value
# Especial constructors ----------------------------------------------------
def operateValue(self,func):
"""
Execute a function on the value of the uVar object
Returns a new object
"""
new = self.copy()
new.value = func(new.value)
return new
def newValue(self,value):
"""
Sets a new value
Returns a new object
"""
new = self.copy()
new.value = value
return new
def newUnit(self):
"""
Return the units of a uVar object as a new object
"""
new = self.copy()
new.value = 1
return new
# Private methods -----------------------------------------------------------
def _construct_name_old(self):
"""
Private function
Return a new complex name from the base units exponents
Old version to be eliminated
"""
first = True
name = ''
for i in range(0,nbase):
if self.vector[i] != 0:
if not first:
name = name + '*'
first = False
name = name + v_names[i]
if self.vector[i] != 1:
name = name + '^' + str(self.vector[i])
return name
def _reconstruct(self,defaults=True):
"""
Private function
Reconstruct the own name from base units
If defaults is True, will use defaults if available
"""
# Remove the scaling
self.value = self.value * self.scale
self.scale = 1.0
# Check if it targets a default unit
if defaults:
tup = tuple(self.vector)
if tup in register:
unit = register[tup]
self.value = self.value/unit.scale
self.name = unit.name
self.complex = unit.complex
return
# Not complex by default
self.complex = False
# It is not a default unit
name =''
num = False
den = False
# Check if there is numerator and denominator
for i in range(0,nbase):
if self.vector[i]>0: num = True
if self.vector[i]<0: den = True
# If there is no numerator
if not num:
# No numerator and no denominator
if not den:
self.name = ''
self.complex = True | |
gauss_sigma, x)
lorentz_peak = cls._f_lorentz(offset, amplitude, lorentz_hwhm, x)
peak = frac_gauss*gauss_peak + (1-frac_gauss)*lorentz_peak
return peak
@classmethod
def _f_makep(cls, data, peaks, frac_gauss=None):
"""
Make a set of initial peak parameters for deconvolution.
:arg data: data to be fitted
:arg peaks: selected peak positions (see peakpicker())
:returns: an array of peaks, each consisting of the following parameters:
spectral offset (x)
gauss: 2*sigma**2
lorentz: scale (HWHM)
amplitude: amplitude of peak
frac_gauss: fraction of function to be Gaussian (0 -> 1)
"""
if not cls._is_flat_iter(data):
raise TypeError('data must be a flat iterable')
if not cls._is_flat_iter(peaks):
raise TypeError('peaks must be a flat iterable')
if not isinstance(data, numpy.ndarray):
data = numpy.array(data)
p = []
for i in peaks:
pamp = 0.9*abs(data[int(i)])
single_peak = [i, 10, 0.1, pamp, frac_gauss]
p.append(single_peak)
return numpy.array(p)
@classmethod
def _f_conv(cls, parameterset_list, data):
"""
Returns the maximum of a convolution of an initial set of lineshapes and the data to be fitted.
parameterset_list -- a list of parameter lists: n*[[spectral offset (x),
gauss: 2*sigma**2,
lorentz: scale (HWHM),
amplitude: amplitude of peak,
frac_gauss: fraction of function to be Gaussian (0 -> 1)]]
where n is the number of peaks
data -- 1D spectral array
"""
if not cls._is_flat_iter(data):
raise TypeError('data must be a flat iterable')
if not cls._is_iter(parameterset_list):
raise TypeError('parameterset_list must be an iterable')
if not isinstance(data, numpy.ndarray):
data = numpy.array(data)
data[data == 0.0] = 1e-6
x = numpy.arange(len(data), dtype='f8')
peaks_init = cls._f_pks(parameterset_list, x)
data_convolution = numpy.convolve(data, peaks_init[::-1])
auto_convolution = numpy.convolve(peaks_init, peaks_init[::-1])
max_data_convolution = numpy.where(data_convolution == data_convolution.max())[0][0]
max_auto_convolution = numpy.where(auto_convolution == auto_convolution.max())[0][0]
return max_data_convolution - max_auto_convolution
@classmethod
def _f_pks_list(cls, parameterset_list, x):
"""
Return a list of peak evaluations for deconvolution. See _f_pk().
Keyword arguments:
parameterset_list -- a list of parameter lists: [spectral offset (x),
gauss: 2*sigma**2,
lorentz: scale (HWHM),
amplitude: amplitude of peak,
frac_gauss: fraction of function to be Gaussian (0 -> 1)]
x -- array of equal length to FID
"""
if not cls._is_iter_of_iters(parameterset_list):
raise TypeError('Parameter set must be an iterable of iterables')
for p in parameterset_list:
if not cls._is_iter(p):
raise TypeError('Parameter set must be an iterable')
if not all(isinstance(i, numbers.Number) for i in p):
raise TypeError('Keyword parameters must be numbers.')
if not cls._is_iter(x):
raise TypeError('x must be an iterable')
if not isinstance(x, numpy.ndarray):
x = numpy.array(x)
return numpy.array([Fid._f_pk(x, *peak) for peak in parameterset_list])
@classmethod
def _f_pks(cls, parameterset_list, x):
"""
Return the sum of a series of peak evaluations for deconvolution. See _f_pk().
Keyword arguments:
parameterset_list -- a list of parameter lists: [spectral offset (x),
gauss: 2*sigma**2,
lorentz: scale (HWHM),
amplitude: amplitude of peak,
frac_gauss: fraction of function to be Gaussian (0 -> 1)]
x -- array of equal length to FID
"""
if not cls._is_iter_of_iters(parameterset_list):
raise TypeError('Parameter set must be an iterable of iterables')
for p in parameterset_list:
if not cls._is_iter(p):
raise TypeError('Parameter set must be an iterable')
if not all(isinstance(i, numbers.Number) for i in p):
raise TypeError('Keyword parameters must be numbers.')
if not cls._is_iter(x):
raise TypeError('x must be an iterable')
if not isinstance(x, numpy.ndarray):
x = numpy.array(x)
peaks = x*0.0
for p in parameterset_list:
peak = cls._f_pk(x,
offset=p[0],
gauss_sigma=p[1],
lorentz_hwhm=p[2],
amplitude=p[3],
frac_gauss=p[4],
)
peaks += peak
return peaks
@classmethod
def _f_res(cls, p, data):
"""
Objective function for deconvolution. Returns residuals of the devonvolution fit.
x -- array of equal length to FID
Keyword arguments:
p -- lmfit parameters object:
offset_n -- spectral offset in x
sigma_n -- gaussian 2*sigma**2
hwhm_n -- lorentzian half width at half maximum height
amplitude_n -- amplitude of peak
frac_gauss_n -- fraction of function to be Gaussian (0 -> 1)
where n is the peak number (zero-indexed)
data -- spectrum array
"""
if not isinstance(p, lmfit.parameter.Parameters):
raise TypeError('Parameters must be of type lmfit.parameter.Parameters.')
if not cls._is_flat_iter(data):
raise TypeError('data must be a flat iterable.')
if not isinstance(data, numpy.ndarray):
data = numpy.array(data)
params = Fid._parameters_to_list(p)
x = numpy.arange(len(data), dtype='f8')
res = data-cls._f_pks(params, x)
return res
@classmethod
def _f_fitp(cls, data, peaks, frac_gauss=None, method='leastsq'):
"""Fit a section of spectral data with a combination of Gaussian/Lorentzian peaks for deconvolution.
Keyword arguments:
peaks -- selected peak positions (see peakpicker())
frac_gauss -- fraction of fitted function to be Gaussian (1 - Guassian, 0 - Lorentzian)
returns:
fits -- list of fitted peak parameter sets
Note: peaks are fitted by default using the Levenberg-Marquardt algorithm[1]. Other fitting algorithms are available (http://cars9.uchicago.edu/software/python/lmfit/fitting.html#choosing-different-fitting-methods).
[1] Marquardt, <NAME>. 'An algorithm for least-squares estimation of nonlinear parameters.' Journal of the Society for Industrial & Applied Mathematics 11.2 (1963): 431-441.
"""
data = numpy.real(data)
if not cls._is_flat_iter(data):
raise TypeError('data must be a flat iterable')
if not cls._is_flat_iter(peaks):
raise TypeError('peaks must be a flat iterable')
if any(peak > (len(data)-1) for peak in peaks):
raise ValueError('peaks must be within the length of data.')
if not isinstance(data, numpy.ndarray):
data = numpy.array(data)
p = cls._f_makep(data, peaks, frac_gauss=0.5)
init_ref = cls._f_conv(p, data)
if any(peaks+init_ref < 0) or any(peaks+init_ref > len(data)-1):
init_ref = 0
if frac_gauss==None:
p = cls._f_makep(data, peaks+init_ref, frac_gauss=0.5)
else:
p = cls._f_makep(data, peaks+init_ref, frac_gauss=frac_gauss)
params = lmfit.Parameters()
for parset in range(len(p)):
current_parset = dict(zip(['offset', 'sigma', 'hwhm', 'amplitude', 'frac_gauss'], p[parset]))
for k,v in current_parset.items():
par_name = '%s_%i'%(k, parset)
params.add(name=par_name,
value=v,
vary=True,
min=0.0)
if 'offset' in par_name:
params[par_name].max = len(data)-1
if 'frac_gauss' in par_name:
params[par_name].max = 1.0
if frac_gauss is not None:
params[par_name].vary = False
#if 'sigma' in par_name or 'hwhm' in par_name:
# params[par_name].max = 0.01*current_parset['amplitude']
if 'amplitude' in par_name:
params[par_name].max = 2.0*data.max()
try:
mz = lmfit.minimize(cls._f_res, params, args=([data]), method=method)
fits = Fid._parameters_to_list(mz.params)
except:
fits = None
return fits
@classmethod
def _parameters_to_list(cls, p):
n_pks = int(len(p)/5)
params = []
for i in range(n_pks):
current_params = [p['%s_%s'%(par, i)].value for par in ['offset', 'sigma', 'hwhm', 'amplitude', 'frac_gauss']]
params.append(current_params)
return params
@classmethod
def _deconv_datum(cls, list_parameters):
if len(list_parameters) != 5:
raise ValueError('list_parameters must consist of five objects.')
if (type(list_parameters[1]) == list and len(list_parameters[1]) == 0) or \
(type(list_parameters[2]) == list and len(list_parameters[2]) == 0):
return []
datum, peaks, ranges, frac_gauss, method = list_parameters
if not cls._is_iter_of_iters(ranges):
raise TypeError('ranges must be an iterable of iterables')
if not all(len(rng) == 2 for rng in ranges):
raise ValueError('ranges must contain two values.')
if not all(rng[0] != rng[1] for rng in ranges):
raise ValueError('data_index must contain different values.')
if not isinstance(datum, numpy.ndarray):
datum = numpy.array(datum)
if datum.dtype in cls._complex_dtypes:
raise TypeError('data must be not be complex.')
fit = []
for j in zip(peaks, ranges):
d_slice = datum[j[1][0]:j[1][1]]
p_slice = j[0]-j[1][0]
f = cls._f_fitp(d_slice, p_slice, frac_gauss=frac_gauss, method=method)
f = numpy.array(f).transpose()
f[0] += j[1][0]
f = f.transpose()
fit.append(f)
return fit
def deconv(self, method='leastsq', frac_gauss=0.0):
"""
Deconvolute :attr:`~nmrpy.data_obects.Fid.data` object by fitting a
series of peaks to the spectrum. These peaks are generated using the parameters
in :attr:`~nmrpy.data_objects.Fid.peaks`. :attr:`~nmrpy.data_objects.Fid.ranges`
splits :attr:`~nmrpy.data_objects.Fid.data` up into smaller portions. This
significantly speeds up deconvolution time.
:keyword frac_gauss: (0-1) determines the Gaussian fraction of the peaks. Setting this argument to None will fit this parameter as well.
:keyword method: The fitting method to use. Default is 'leastsq', the Levenberg-Marquardt algorithm, which is usually sufficient. Additional options include:
Nelder-Mead (nelder)
L-BFGS-B (l-bfgs-b)
Conjugate Gradient (cg)
Powell (powell)
Newton-CG (newton)
"""
if not len(self.data):
raise AttributeError('data does not exist.')
if self.data.dtype in self._complex_dtypes:
raise TypeError('data must be not be complex.')
if self.peaks is None:
raise AttributeError('peaks must be picked.')
if self.ranges is None:
raise AttributeError('ranges must be specified.')
print('deconvoluting {}'.format(self.id))
list_parameters = [self.data, self._grouped_index_peaklist, self._index_ranges, frac_gauss, method]
self._deconvoluted_peaks = numpy.array([j for i in Fid._deconv_datum(list_parameters) for j in i])
print('deconvolution completed')
def plot_ppm(self, **kwargs):
"""
Plot :attr:`~nmrpy.data_objects.Fid.data`.
:keyword upper_ppm: | |
<reponame>ratschlab/immunopepper
import gzip
import os
import pickle
import pytest
import pysam
import numpy as np
from immunopepper.filter import junction_tuple_is_annotated
from immunopepper.io_ import convert_namedtuple_to_str
from immunopepper.io_ import gz_and_normal_open
from immunopepper.io_ import load_pickled_graph
from immunopepper.mutations import apply_germline_mutation
from immunopepper.mutations import construct_mut_seq_with_str_concat
from immunopepper.mutations import get_mutation_mode_from_parser
from immunopepper.mutations import get_sub_mutation_tuple
from immunopepper.preprocess import genes_preprocess_all
from immunopepper.preprocess import parse_mutation_from_vcf
from immunopepper.preprocess import parse_mutation_from_maf
from immunopepper.preprocess import preprocess_ann
from immunopepper.utils import check_chr_consistence
from immunopepper.utils import create_libsize
from immunopepper.utils import get_sub_mut_dna
from immunopepper.utils import get_concat_peptide
from immunopepper.translate import translate_dna_to_peptide
from immunopepper.translate import complementary_seq
from immunopepper.immunopepper import parse_arguments
from immunopepper.immunopepper import split_mode
from immunopepper.traversal import create_output_kmer
from immunopepper.namedtuples import Coord
from immunopepper.namedtuples import Mutation
from immunopepper.namedtuples import OutputBackground
from immunopepper.namedtuples import OutputKmer
data_dir = os.path.join(os.path.dirname(__file__), 'test1','data')
groundtruth_dir = os.path.join(os.path.dirname(__file__), 'test1')
@pytest.fixture
def load_gene_data():
f = open(os.path.join(data_dir, 'posgraph','spladder',
'genes_graph_conf3.merge_graphs.pickle'), 'rb')
ann_path = os.path.join(data_dir, 'test1pos.gtf')
ref_path = os.path.join(data_dir, 'test1pos.fa')
(graph_data, graph_meta) = load_pickled_graph(f) # cPickle.load(f)
genetable,chr_set = preprocess_ann(ann_path)
interesting_chr = list(map(str, range(1, 23))) + ["X", "Y", "MT"]
gene = graph_data[0]
chrm = gene.chr.strip()
ref_seq = ref_path # seq_dict[chrm]
return graph_data, ref_path, genetable.gene_to_cds_begin
@pytest.fixture
def load_mutation_data():
vcf_path = os.path.join(data_dir, 'test1pos.vcf')
maf_path = os.path.join(data_dir, 'test1pos.maf')
mutation_dic_vcf = parse_mutation_from_vcf(vcf_path=vcf_path,h5_sample_list=['test1pos'])
mutation_dic_maf = parse_mutation_from_maf(maf_path=maf_path)
return mutation_dic_vcf, mutation_dic_maf
def test_preprocess(load_gene_data):
graph_data, _, gene_cds_begin_dict = load_gene_data
gene_info = genes_preprocess_all(graph_data, gene_cds_begin_dict)
assert gene_info[0].nvertices == 8
def test_germline_mutation(load_gene_data, load_mutation_data):
graph_data, ref_path, gene_cds_begin_dict = load_gene_data
mutation_dic_vcf, mutation_dic_maf = load_mutation_data
gene = graph_data[0]
mutation_sub_dic_vcf = mutation_dic_vcf['test1pos', gene.chr]
ref_mut_seq = apply_germline_mutation(ref_sequence_file=ref_path,
chrm=gene.chr,
pos_start=gene.start,
pos_end=gene.stop,
mutation_sub_dict=mutation_sub_dic_vcf)
assert 'ref' in ref_mut_seq.keys()
def test_get_sub_mut_dna(load_gene_data, load_mutation_data):
graph_data, ref_path, gene_cds_begin_dict = load_gene_data
mutation_dic_vcf, mutation_dic_maf = load_mutation_data
gene = graph_data[0]
# made modification to the mutation_dic_vcf
var_dict = {'ref_base': 'G', 'mut_base': 'A', 'strand': '+',
'Variant_Classification': 'Silent', 'Variant_Type': 'SNP'}
mutation_sub_dic_maf = mutation_dic_maf['test1pos', gene.chr]
mutation_sub_dic_maf[41] = var_dict
test_list = [[11, 29, 38, 50],
[11, 29, 38, 50],
[60, 75, 87, 102]]
groundtruth = ['GATGACGCACGCATGGTGGTGGGTTGCGGA',
'GATGACGCACGCATGGTGGTGAGTTGCGGA',
'GTTCAGGTACGTATATCGACGTTCTGGTGG',
]
variant_comb = [(38,), (38, 41), '.']
strand = ['+', '+', '+']
for i, vlist in enumerate(test_list):
with pysam.FastaFile(ref_path) as fh:
ref_seq = fh.fetch(gene.chr, vlist[0], vlist[3])
coord = Coord(vlist[0], vlist[1], vlist[2], vlist[3])
sub_dna = get_sub_mut_dna(ref_seq, coord, variant_comb[i],
mutation_sub_dic_maf, strand[i], vlist[0])
assert sub_dna == groundtruth[i]
def test_reading_gtf_and_gff3_file():
gff3_path = os.path.join(data_dir,'small.gencode.v29.gff3')
gtf_path = os.path.join(data_dir, 'small.gencode.v29.gtf')
gene_table_gtf,_ = preprocess_ann(gtf_path)
gene_table_gff,_ = preprocess_ann(gff3_path)
assert gene_table_gff.gene_to_ts == gene_table_gtf.gene_to_ts
assert gene_table_gtf.ts_to_cds.keys() == gene_table_gff.ts_to_cds.keys()
assert gene_table_gtf.ts_to_cds['ENST00000335137.4'] == [(69090, 70005, 0)]
assert gene_table_gff.ts_to_cds['ENST00000335137.4'] == [(69090, 70008, 0)] # include stop codon in gff3
def test_reading_gtf_and_gff_file():
gff_path = os.path.join(data_dir,'small.gencode.v19.gff')
gtf_path = os.path.join(data_dir, 'small.gencode.v19.gtf')
gene_table_gtf,_ = preprocess_ann(gtf_path)
gene_table_gff,_ = preprocess_ann(gff_path)
assert gene_table_gff.gene_to_ts == gene_table_gtf.gene_to_ts
assert gene_table_gtf.ts_to_cds.keys() == gene_table_gff.ts_to_cds.keys()
assert gene_table_gff.ts_to_cds['ENST00000335137.3'] == [(69090, 70006, 0)]
assert gene_table_gtf.ts_to_cds['ENST00000335137.3'] == [(69090, 70005, 0)]
def test_reading_vcf_h5():
vcf_dict_default_heter_code0 = parse_mutation_from_vcf(os.path.join(data_dir,'test1vcf.h5'),h5_sample_list=['test1pos','test1neg'])
vcf_dict_heter_code2 = parse_mutation_from_vcf(os.path.join(data_dir,'test1vcf.h5'),h5_sample_list=['test1pos','test1neg'],heter_code=2)
assert len(vcf_dict_default_heter_code0) == 2
assert vcf_dict_default_heter_code0['test1neg', 'X'][135] == {'mut_base': 'G', 'ref_base': 'C'}
assert vcf_dict_default_heter_code0['test1pos', 'X'][14] == {'mut_base': 'C', 'ref_base':'G'}
assert vcf_dict_heter_code2['test1pos', 'X'][135] == {'mut_base': 'G', 'ref_base': 'C'}
assert vcf_dict_heter_code2['test1neg', 'X'][14] == {'mut_base': 'C', 'ref_base':'G'}
assert vcf_dict_heter_code2['test1neg', 'X'][135] == {'mut_base': 'G', 'ref_base':'C'}
def test_construct_mut_seq_with_str_concat():
ref_seq = 'GTAATGTGTAAGATGACGCACGCATGGTGGTATTGGAGATGGGTTGCGGAGTAAGTTCGAGTTC'
gt_mut_seq2 = 'GTAATGTGTAAGATGACGCACGCATA'+'C'+'TGGTATTGGAGATGGGTTGCGGAGTAAGTTCGAGTTC'
gt_mut_seq3 = 'GTAATGTGTAAGATGACGCACGCATA'+'G'+'TGGTATTGGAGATGGGTTGCGGAGTAAGTTCGAGTTC'
mut_dict = {}
mut_dict[10] = {'mut_base':'*','ref_base':'A'}
mut_dict[25] = {'mut_base':'A','ref_base':'G'}
mut_dict[26] = {'mut_base':'C','ref_base':'G'}
mut_seq1 = construct_mut_seq_with_str_concat(ref_seq[45:], 45, 46, mut_dict) # test unclear mut_base
assert mut_seq1 == ref_seq[45:]
mut_seq2 = construct_mut_seq_with_str_concat(ref_seq[25:], 25, 27, mut_dict) # [25,27) include 26
assert mut_seq2 == gt_mut_seq2[25:]
mut_seq3 = construct_mut_seq_with_str_concat(ref_seq[25:], 25, 26, mut_dict) # [25,26) not include 26
assert mut_seq3 == gt_mut_seq3[25:]
def test_get_mutation_mode_from_parser():
basic_args = ['build',
'--samples','this_sample',
'--splice-path','this_splicegraph',
'--output-dir','this_output_dir',
'--ann-path','this_ann_path',
'--ref-path','this_ref_path']
my_args1 = basic_args+[
'--germline', os.path.join(data_dir,'test1pos.vcf'),
'--somatic', os.path.join(data_dir,'test1pos.maf'),
'--mutation-mode', 'somantic'] # bad mutation mode
args = parse_arguments(my_args1)
try:
get_mutation_mode_from_parser(args)
except SystemExit:
assert 1
my_args2 = basic_args+['--germline', os.path.join(data_dir,'test1pos.vcf'),
'--mutation-mode', 'somatic'] # mismatch mutation mode and input files
args = parse_arguments(my_args2)
try:
get_mutation_mode_from_parser(args)
except SystemExit:
assert 1
def test_get_sub_mutation_tuple():
germline_dict = {('test1pos','1'):{135:{'mut_base': 'G', 'ref_base': 'C'}},('test1neg','1'):{136:{'mut_base': 'G', 'ref_base': 'C'}}}
somatic_dict = {('test1pos','1'):{28:{'mut_base': 'G', 'ref_base': 'C'}},('test2neg','1'):{29:{'mut_base': 'G', 'ref_base': 'C'}}}
sample = 'test1pos'
chrm = '1'
mutation = Mutation(mode='somatic_and_germline',germline_mutation_dict=germline_dict,somatic_mutation_dict=somatic_dict)
sub_mutation = get_sub_mutation_tuple(mutation,sample,chrm)
assert sub_mutation.somatic_mutation_dict == mutation.somatic_mutation_dict[('test1pos','1')]
assert sub_mutation.germline_mutation_dict == mutation.germline_mutation_dict[('test1pos','1')]
assert sub_mutation.mode == mutation.mode
# (test1neg,'1') not in the keys of maf_dict
sample = 'test1neg'
try:
sub_mutation = get_sub_mutation_tuple(mutation, sample, chrm)
except SystemExit:
assert 1
# (test1pos, 2) does exists neither in vcf nor maf
sample = 'test1pos'
chrm = '2'
try:
sub_mutation = get_sub_mutation_tuple(mutation, sample, chrm)
except SystemExit:
assert 1
def test_create_output_kmer():
k = 3
peptide = OutputBackground('1','MTHAW')
expr_lists = [(8,1000),(1,220),(6,0)] # test 0 expression
c = create_output_kmer(peptide, k, expr_lists)
true_output = [OutputKmer('MTH','1',913.33,False,NOT_EXIST), OutputKmer('THA','1',580.0,False,NOT_EXIST), OutputKmer('HAW','1',246.67,False,NOT_EXIST)]
assert c == true_output
expr_lists = [(8,1000),(1,220),(0,0)] # test 0 expression
c = create_output_kmer(peptide, k, expr_lists)
true_output = [OutputKmer('MTH','1',913.33,False,NOT_EXIST), OutputKmer('THA','1',870.0,False,NOT_EXIST), OutputKmer('HAW','1',740.0,False,NOT_EXIST)]
assert c == true_output
def test_get_concat_peptide():
front_coord = Coord(10,19,25,33)
back_coord = Coord(27,36,44,53)
front_peptide = ''
back_peptide = 'MGF'
strand = '+'
concat_pep = get_concat_peptide(front_coord,back_coord,front_peptide,back_peptide,strand)
assert concat_pep == ''
front_peptide = 'MGF'
back_peptide = ''
strand = '+'
concat_pep = get_concat_peptide(front_coord,back_coord,front_peptide,back_peptide,strand)
assert concat_pep == ''
front_peptide = 'EDM'
back_peptide = 'DMF'
strand = '+'
concat_pep = get_concat_peptide(front_coord,back_coord,front_peptide,back_peptide,strand)
assert concat_pep == 'EDMF'
# neg case
front_coord = Coord(35,43,20,29)
back_coord = Coord(18,26,17,13)
strand = '-'
front_peptide = 'EDM'
back_peptide = 'DMF'
concat_pep = get_concat_peptide(front_coord,back_coord,front_peptide,back_peptide,strand)
assert concat_pep == 'EDMF'
def test_convert_namedtuple_to_str():
other_pep_field_list = ['id', 'new_line', 'peptide']
back_pep1 = OutputBackground(1,'EDMHG')
back_pep2 = OutputBackground(2,'')
back_pep3 = OutputBackground(3,'KKQ')
back_pep_list = [back_pep1,back_pep2,back_pep3]
result = [convert_namedtuple_to_str(back_pep,other_pep_field_list)+'\n' for back_pep in back_pep_list]
expected_result = ['1\nEDMHG\n', '2\n\n', '3\nKKQ\n']
assert result == expected_result
other_pep_field_list = ['kmer','id','expr','is_cross_junction','junction_count']
kmer_pep1 = OutputKmer('','GENE0_1_2',NOT_EXIST,False,NOT_EXIST)
kmer_pep2 = OutputKmer('AQEB','GENE0_1_3',20,True,25)
kmer_pep_list = [kmer_pep1,kmer_pep2]
result = [convert_namedtuple_to_str(kmer_pep,other_pep_field_list)+'\n' for kmer_pep in kmer_pep_list]
expected_result = ['\tGENE0_1_2\t.\tFalse\t.\n', 'AQEB\tGENE0_1_3\t20\tTrue\t25\n']
assert result == expected_result
def test_get_junction_ann_flag():
junction_flag = np.zeros((4,4))
junction_flag[1,2] = 1
junction_flag[2,3] = 1
vertex_id_tuple = (1, 2, 3)
assert junction_tuple_is_annotated(junction_flag, vertex_id_tuple) == 1
vertex_id_tuple = (0, 2, 3)
assert junction_tuple_is_annotated(junction_flag, vertex_id_tuple) == 1
vertex_id_tuple = (0, 1, 3)
assert junction_tuple_is_annotated(junction_flag, vertex_id_tuple) == 0
vertex_id_tuple = (1, 2)
assert junction_tuple_is_annotated(junction_flag, vertex_id_tuple) == 1
def test_check_chr_consistence(load_gene_data, load_mutation_data):
graph_data, _, gene_cds_begin_dict = load_gene_data
mutation_dic_vcf, mutation_dic_maf = load_mutation_data
mutation = Mutation(None,mutation_dic_maf,mutation_dic_vcf) # vcf_dict[('test1pos','X')]
ann_path = os.path.join(data_dir, 'test1pos.gtf')
genetable,chr_set = preprocess_ann(ann_path)
check_chr_consistence(chr_set,mutation,graph_data)
# conflict between chr_set and vcf_dict
try:
chr_set = set(['chrX'])
check_chr_consistence(chr_set, mutation, graph_data)
except SystemExit:
assert 1
# conflict between chr_set and gene.chr
try:
graph_data[0].chr = 'chrX'
check_chr_consistence(chr_set, mutation, graph_data)
except SystemExit:
assert 1
def test_create_libsize():
fp = 'temp.txt'
expr_distr_dict = {'test1pos':['.'],'test1neg':[2,10,11]}
libsize_count_dict = create_libsize(expr_distr_dict,fp,debug=True)
assert libsize_count_dict == {'test1neg':(10.5,23)}
def check_kmer_pos_valid(new_junction_file, genome_file, mutation_mode='somatic', sample=None,
germline_file_path=None,somatic_file_path=None,basic_args=None):
"""
Check if the exact dna position can output the same kmer
Parameters
----------
new_junction_file: str. kmer_junction file path outputed by filter
genome_file: str. genome file path.
mutation_mode: str. choose from four modes. ref, germline, somatic, somatic_and_germline
sample: str. sample name
germline_file_path: str. germline file path
somatic_file_path: str. somatic file path
"""
#read the variant file
if basic_args is None:
basic_args = ['build',
'--samples','this_sample',
'--splice-path','this_splicegraph',
'--output-dir','this_output_dir',
'--ann-path','this_ann_path',
'--ref-path','this_ref_path']
my_args1 = basic_args+[
'--somatic', somatic_file_path,
'--germline',germline_file_path,
'--mutation-mode', mutation_mode]
args = parse_arguments(my_args1)
mutation = get_mutation_mode_from_parser(args)
# read genome file
seq_dict = {}
with pysam.FastaFile(genome_file) as fh:
refs = fh.references
lens = fh.lengths
for i,ref in enumerate(refs):
if mutation_mode in ['germline', 'somatic_and_germline']:
if (sample, ref) in mutation.germline_mutation_dict:
seq_dict[ref] = apply_germline_mutation(ref_sequence_file=genome_file,
chrm=ref,
pos_start=0,
pos_end=lens[i],
mutation_sub_dict=mutation.germline_mutation_dict[(sample, ref)])['background']
else:
seq_dict[ref] = fh.fetch(ref)
f = gz_and_normal_open(new_junction_file,'r')
headline = next(f)
for line_id, line in enumerate(f):
if line_id % 10000 == 0:
print("{} kmers validated".format(line_id))
items = line.strip().split('\t')
kmer = items[0]
exact_kmer_pos = items[5]
gene_chr, gene_strand, somatic_comb,pos_str = exact_kmer_pos.split('_')
pos_list = [int(pos) for pos in pos_str.split(';')]
sub_mutation = get_sub_mutation_tuple(mutation, sample, gene_chr)
somatic_comb_list = somatic_comb.split(';')
if somatic_comb_list[0] == NOT_EXIST:
somatic_comb_list = []
i = 0
seq_list = []
if gene_strand == '+':
while i < len(pos_list):
orig_str = seq_dict[gene_chr][pos_list[i]:pos_list[i+1]]
for somatic_mut_pos in somatic_comb_list:
somatic_mut_pos = int(somatic_mut_pos)
if somatic_mut_pos in range(pos_list[i],pos_list[i+1]):
offset = somatic_mut_pos-pos_list[i]
mut_base = sub_mutation.somatic_mutation_dict[somatic_mut_pos]['mut_base']
ref_base = sub_mutation.somatic_mutation_dict[somatic_mut_pos]['ref_base']
# assert ref_base == orig_str[offset]
orig_str = orig_str[:offset] + mut_base+orig_str[offset+1:]
#somatic_comb_list.remove(str(somatic_mut_pos))
seq_list.append(orig_str)
i += 2
seq = ''.join(seq_list)
else:
while i < len(pos_list)-1:
orig_str = seq_dict[gene_chr][pos_list[i+1]:pos_list[i]]
for somatic_mut_pos in somatic_comb_list:
somatic_mut_pos = int(somatic_mut_pos)
if somatic_mut_pos in range(pos_list[i+1], pos_list[i]):
offset = somatic_mut_pos - pos_list[i+1]
mut_base = sub_mutation.somatic_mutation_dict[somatic_mut_pos]['mut_base']
ref_base = sub_mutation.somatic_mutation_dict[somatic_mut_pos]['ref_base']
# assert ref_base == orig_str[offset]
orig_str = orig_str[:offset] + mut_base + orig_str[offset+1:]
#somatic_comb_list.remove(somatic_mut_pos)
seq_list.append(orig_str[::-1])
i += 2
seq = ''.join(seq_list)
seq = complementary_seq(seq)
aa,_ = translate_dna_to_peptide(seq)
assert aa == kmer
# case='neg'
# mutation_mode='somatic_and_germline'
# sample='test1{}'.format(case)
# new_junction_file = 'new_junction_kmer.txt'
# genome_file = 'tests/test1/data/test1{}.fa'.format(case)
# germline_file_path='tests/test1/data/test1{}.vcf'.format(case)
# somatic_file_path='tests/test1/data/test1{}.maf'.format(case)
# check_kmer_pos_valid(new_junction_file,genome_file,mutation_mode,sample,germline_file_path,somatic_file_path)
@pytest.mark.parametrize("test_id,case,mutation_mode", [
['1', 'pos', 'ref'],
['1', 'pos', 'germline'],
['1', 'pos', 'somatic'],
['1', 'pos', 'somatic_and_germline'],
['1', 'neg', 'ref'],
['1', 'neg', 'germline'],
['1', 'neg', 'somatic'],
['1', | |
mapping')
plt.show()
return image_tonemapped
def srgb_to_linear(image_srgb, verbose=False):
'''
---------------------------------------------------------------------------
Transform an image from sRGB color space to linear
---------------------------------------------------------------------------
The function undos the main non-linearities associated with the sRGB color
space, in order to approximate a linear color response. Note that the
linear image output will look darker, because the gamma correction will be
undone. The transformation formulas can be found in the EasyRGB website:
https://www.easyrgb.com/en/math.php
Note that the formulas may look slightly different. This is because they
have been altered in order to implement them in a vectorized way, avoiding
for loops. As such, an image is partitioned in 2 parts image_upper and
image_lower, which implement separate parts of the piece-wise color
transformation formula.
INPUTS
------
image_srgb: numpy array of WxHx3 of uint8 [0,255]
Input color image with values in the interval [0,255]. Assuming that
it is encoded on the sRGB color space. The code will still work if the
input image is grayscale or within [0,1] range.
verbose: boolean
Display outputs.
OUTPUT
------
image_linear: numpy array of WxHx3 of float [0,1]
Output color linear image with values in the interval [0,1]. Gamma has
been removed, so it looks darker.
'''
# dealing with different input dimensions
dimensions = len(image_srgb.shape)
if dimensions == 1:
image_srgb = np.expand_dims(image_srgb, axis=2) # make a 3rd dimension
image_srgb = img_as_float(image_srgb) # [0,1]
# lower part of the piecewise formula
image_lower = image_srgb.copy()
image_lower[image_lower > 0.04045] = 0
image_lower = image_lower / 12.92
# upper part of the piecewise formula
image_upper = image_srgb.copy()
image_upper = image_upper + 0.055
image_upper[image_upper <= (0.04045+0.055)] = 0
image_upper = image_upper / 1.055
image_upper = image_upper ** 2.4
image_linear = image_lower + image_upper # combine into the final result
if verbose is True:
plt.figure()
plt.subplot(1,2,1)
plt.imshow(image_srgb, vmin=0, vmax=1)
plt.title('Image sRGB')
plt.axis('off')
plt.subplot(1,2,2)
plt.imshow(image_linear, vmin=0, vmax=1)
plt.title('Image linear')
plt.axis('off')
plt.tight_layout(True)
plt.suptitle('sRGB -> linear space')
plt.show()
return image_linear
def linear_to_srgb(image_linear, verbose=False):
'''
---------------------------------------------------------------------------
Transform an image from linear to sRGB color space
---------------------------------------------------------------------------
The function re-applies the main non-linearities associated with the sRGB
color space. The transformation formula can be found in EasyRGB website:
https://www.easyrgb.com/en/math.php
Note that the formulas may look slightly different. This is because they
have been altered in order to implement them in a vectorized way, avoiding
for loops. As such, an image is partitioned in 2 parts image_upper and
image_lower, which implement separate parts of the piece-wise color
transformation formula.
INPUTS
------
image_linear: numpy array of WxHx3 of float [0,1]
Input color image with values in the interval [0,1].
verbose: boolean
Display outputs.
OUTPUT
------
image_srgb: numpy array of WxHx3 of uint8 [0,255]
Output color sRGB image with values in the interval [0,255].
'''
# dealing with different input dimensions
dimensions = len(image_linear.shape)
if dimensions == 1:
image_linear = np.expand_dims(image_linear, axis=2) # 3rd dimension
image_linear = img_as_float(image_linear) # [0,1]
# lower part of the piecewise formula
image_lower = image_linear.copy()
image_lower[image_lower > 0.0031308] = 0
image_lower = image_lower * 12.92
# upper part of the piecewise formula
image_upper = image_linear.copy()
image_upper[image_upper <= 0.0031308] = 0
image_upper = image_upper ** (1/2.4)
image_upper = image_upper * 1.055
image_upper = image_upper - 0.055
image_srgb = image_lower + image_upper
image_srgb = np.clip(a=image_srgb, a_min=0, a_max=1, out=image_srgb)
if verbose is True:
plt.figure()
plt.subplot(1,2,1)
plt.imshow(image_linear, vmin=0, vmax=1)
plt.title('Image linear')
plt.axis('off')
plt.subplot(1,2,2)
plt.imshow(image_srgb, vmin=0, vmax=1)
plt.title('Image sRGB')
plt.axis('off')
plt.tight_layout(True)
plt.suptitle('Linear space -> sRGB')
plt.show()
return (image_srgb * 255).astype(np.uint8)
def transfer_graytone_to_color(image_color, image_graytone, verbose=False):
'''
---------------------------------------------------------------------------
Transfer grayscale tones to a color image
---------------------------------------------------------------------------
Transfers the tones of a guide grayscale image to the color version of the
same image, by using linear color ratios. It first brings the image from
the sRGB color space back to the linear color space. It estimates color
ratios of the grayscale color image with the tone-mapped grayscale guide
image. It then applies the color ratios on the 3 color channels. Finally,
it brings back the image to the sRGB color space (gamma corrected). Is the
input image is in another color space (Adobe RGB), a different
transformation could be used. However, results will not be that much
different.
Related publication:
<NAME>, <NAME>, <NAME>, and <NAME>, "Color
preservation for tone reproduction and image enhancement", Proc. SPIE 9015,
Color Imaging XIX, 2014
INPUTS
------
image_color: numpy array of WxHx3 of uint8 [0,255]
Input color image.
image_graytone: numpy array of WxH of float [0,1]
Grayscale version of the image_color which has been tonemapped and it
will be used as a guide to transfer the same tonemapping to the color
image.
verbose: boolean
Display outputs.
OUTPUT
------
image_colortone: numpy array of WxHx3 of uint8 [0,255]
Output color image with transfered tonemapping.
'''
EPSILON = 1 / 256
# bring both color and graytone to linear space
image_color_linear = srgb_to_linear(image_color.copy(), verbose=False)
image_graytone_linear = srgb_to_linear(image_graytone.copy(),verbose=False)
image_gray_linear = rgb2gray(image_color_linear.copy())
image_gray_linear[image_gray_linear==0] = EPSILON # for the division later
# tone ratio of linear images: improved/original
tone_ratio = image_graytone_linear / image_gray_linear
# tone_ratio[np.isinf(tone_ratio)] = 0
# tone_ratio[np.isnan(tone_ratio)] = 0
# apply the tone ratios to the color image
image_colortone_linear = image_color_linear * np.dstack([tone_ratio] * 3)
# make sure it's within limits
image_colortone_linear = np.clip(
a=image_colortone_linear,
a_min=0,
a_max=1,
out=image_colortone_linear
)
# bring back to gamma-corrected sRGB space for visualization
image_colortone = linear_to_srgb(image_colortone_linear, verbose=False)
# display results
if verbose is True:
plt.figure()
plt.subplot(2,4,1)
plt.imshow(image_color, vmin=0, vmax=255)
plt.title('Color')
plt.axis('off')
plt.subplot(2,4,5)
plt.imshow(image_color_linear, vmin=0, vmax=1)
plt.title('Color linear')
plt.axis('off')
plt.subplot(2,4,2)
plt.imshow(image_graytone, cmap='gray', vmin=0, vmax=1)
plt.title('Graytone')
plt.axis('off')
plt.subplot(2,4,6)
plt.imshow(image_graytone_linear, cmap='gray', vmin=0, vmax=1)
plt.title('Graytone linear')
plt.axis('off')
plt.subplot(2,4,7)
plt.imshow(tone_ratio, cmap='gray')
plt.title('Tone ratios')
plt.axis('off')
plt.subplot(2,4,4)
plt.imshow(image_colortone, vmin=0, vmax=255)
plt.title('Colortone')
plt.axis('off')
plt.subplot(2,4,8)
plt.imshow(image_colortone_linear, vmin=0, vmax=1)
plt.title('Colortone linear')
plt.axis('off')
plt.tight_layout(True)
plt.suptitle('Transfering gray tones to color')
plt.show()
return image_colortone
def change_color_saturation(
image_color,
image_ph_mask=None,
sat_degree=1.5,
verbose=False):
'''
---------------------------------------------------------------------------
Adjust color saturation of an image
---------------------------------------------------------------------------
Increase or decrease the saturation (vibrance) of colors in an image. This
implements a simpler approach rather than using the HSV color space to
adjust S. In my experiments HSV-based saturation adjustment was not as good
and it exhibited some kind of 'color noise'. This approach is aesthetically
better. The use of photometric_mask is optional, in case you would like to
treat dark areas (where saturation is usually lower) differently.
INPUTS
------
image_color: numpy array of WxHx3 of float [0,1]
Input color image.
image_ph_mask: numpy array of WxH of float [0,1] or None
Grayscale image whose values represent the neighborhood of the pixels
of the input image. If None, saturation adjustment is applied globally
to all pixels. If not None, then dark regions are treated differently
and get an additional boost in saturation.
sat_degree': float [0,inf].
How to change the color saturation. 0: no color (grayscale),
<1: reduced color saturation, 1: color saturation unchanged
>1: increased color saturation
verbose: boolean
Display outputs.
OUTPUT
------
image_new_sat: numpy array of WxHx3 of float [0,1]
Output image with adjusted saturation.
'''
LOCAL_BOOST = 0.2
THRESHOLD_DARK_TONES = 100 / 255
#TODO: return the same image type
image_color = img_as_float(image_color) # [0,1]
# define gray scale
image_gray = (image_color[:,:,0] +
image_color[:,:,1] +
image_color[:,:,2]) / 3
image_gray = np.dstack([image_gray] * 3) # grayscale with 3 channels
image_delta = image_color - image_gray # deviations from gray
# defining local color amplification degree
if image_ph_mask is not None:
detail_amplification_local = image_ph_mask / THRESHOLD_DARK_TONES
detail_amplification_local[detail_amplification_local>1] | |
= None
) -> None:
self.email = email
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="CustomerEmailChanged",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"CustomerEmailChangedMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r, email=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
self.type,
self.resource_user_provided_identifiers,
self.email,
)
)
class CustomerEmailChangedMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.CustomerEmailChangedMessagePayloadSchema`."
#: :class:`str`
email: str
def __init__(self, *, type: str = None, email: str = None) -> None:
self.email = email
super().__init__(type="CustomerEmailChanged")
def __repr__(self) -> str:
return "CustomerEmailChangedMessagePayload(type=%r, email=%r)" % (
self.type,
self.email,
)
class CustomerEmailVerifiedMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.CustomerEmailVerifiedMessageSchema`."
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None
) -> None:
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="CustomerEmailVerified",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"CustomerEmailVerifiedMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
self.type,
self.resource_user_provided_identifiers,
)
)
class CustomerEmailVerifiedMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.CustomerEmailVerifiedMessagePayloadSchema`."
def __init__(self, *, type: str = None) -> None:
super().__init__(type="CustomerEmailVerified")
def __repr__(self) -> str:
return "CustomerEmailVerifiedMessagePayload(type=%r)" % (self.type,)
class CustomerGroupSetMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.CustomerGroupSetMessageSchema`."
#: :class:`commercetools.types.CustomerGroupReference` `(Named` ``customerGroup`` `in Commercetools)`
customer_group: "CustomerGroupReference"
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None,
customer_group: "CustomerGroupReference" = None
) -> None:
self.customer_group = customer_group
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="CustomerGroupSet",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"CustomerGroupSetMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r, customer_group=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
self.type,
self.resource_user_provided_identifiers,
self.customer_group,
)
)
class CustomerGroupSetMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.CustomerGroupSetMessagePayloadSchema`."
#: :class:`commercetools.types.CustomerGroupReference` `(Named` ``customerGroup`` `in Commercetools)`
customer_group: "CustomerGroupReference"
def __init__(
self, *, type: str = None, customer_group: "CustomerGroupReference" = None
) -> None:
self.customer_group = customer_group
super().__init__(type="CustomerGroupSet")
def __repr__(self) -> str:
return "CustomerGroupSetMessagePayload(type=%r, customer_group=%r)" % (
self.type,
self.customer_group,
)
class DeliveryAddedMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DeliveryAddedMessageSchema`."
#: :class:`commercetools.types.Delivery`
delivery: "Delivery"
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None,
delivery: "Delivery" = None
) -> None:
self.delivery = delivery
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="DeliveryAdded",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"DeliveryAddedMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r, delivery=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
self.type,
self.resource_user_provided_identifiers,
self.delivery,
)
)
class DeliveryAddedMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DeliveryAddedMessagePayloadSchema`."
#: :class:`commercetools.types.Delivery`
delivery: "Delivery"
def __init__(self, *, type: str = None, delivery: "Delivery" = None) -> None:
self.delivery = delivery
super().__init__(type="DeliveryAdded")
def __repr__(self) -> str:
return "DeliveryAddedMessagePayload(type=%r, delivery=%r)" % (
self.type,
self.delivery,
)
class DeliveryAddressSetMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DeliveryAddressSetMessageSchema`."
#: :class:`str` `(Named` ``deliveryId`` `in Commercetools)`
delivery_id: str
#: Optional :class:`commercetools.types.Address`
address: typing.Optional["Address"]
#: Optional :class:`commercetools.types.Address` `(Named` ``oldAddress`` `in Commercetools)`
old_address: typing.Optional["Address"]
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None,
delivery_id: str = None,
address: typing.Optional["Address"] = None,
old_address: typing.Optional["Address"] = None
) -> None:
self.delivery_id = delivery_id
self.address = address
self.old_address = old_address
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="DeliveryAddressSet",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"DeliveryAddressSetMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r, delivery_id=%r, address=%r, old_address=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
self.type,
self.resource_user_provided_identifiers,
self.delivery_id,
self.address,
self.old_address,
)
)
class DeliveryAddressSetMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DeliveryAddressSetMessagePayloadSchema`."
#: :class:`str` `(Named` ``deliveryId`` `in Commercetools)`
delivery_id: str
#: Optional :class:`commercetools.types.Address`
address: typing.Optional["Address"]
#: Optional :class:`commercetools.types.Address` `(Named` ``oldAddress`` `in Commercetools)`
old_address: typing.Optional["Address"]
def __init__(
self,
*,
type: str = None,
delivery_id: str = None,
address: typing.Optional["Address"] = None,
old_address: typing.Optional["Address"] = None
) -> None:
self.delivery_id = delivery_id
self.address = address
self.old_address = old_address
super().__init__(type="DeliveryAddressSet")
def __repr__(self) -> str:
return (
"DeliveryAddressSetMessagePayload(type=%r, delivery_id=%r, address=%r, old_address=%r)"
% (self.type, self.delivery_id, self.address, self.old_address)
)
class DeliveryItemsUpdatedMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DeliveryItemsUpdatedMessageSchema`."
#: :class:`str` `(Named` ``deliveryId`` `in Commercetools)`
delivery_id: str
#: List of :class:`commercetools.types.DeliveryItem`
items: typing.List["DeliveryItem"]
#: List of :class:`commercetools.types.DeliveryItem` `(Named` ``oldItems`` `in Commercetools)`
old_items: typing.List["DeliveryItem"]
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None,
delivery_id: str = None,
items: typing.List["DeliveryItem"] = None,
old_items: typing.List["DeliveryItem"] = None
) -> None:
self.delivery_id = delivery_id
self.items = items
self.old_items = old_items
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="DeliveryItemsUpdated",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"DeliveryItemsUpdatedMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r, delivery_id=%r, items=%r, old_items=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
self.type,
self.resource_user_provided_identifiers,
self.delivery_id,
self.items,
self.old_items,
)
)
class DeliveryItemsUpdatedMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DeliveryItemsUpdatedMessagePayloadSchema`."
#: :class:`str` `(Named` ``deliveryId`` `in Commercetools)`
delivery_id: str
#: List of :class:`commercetools.types.DeliveryItem`
items: typing.List["DeliveryItem"]
#: List of :class:`commercetools.types.DeliveryItem` `(Named` ``oldItems`` `in Commercetools)`
old_items: typing.List["DeliveryItem"]
def __init__(
self,
*,
type: str = None,
delivery_id: str = None,
items: typing.List["DeliveryItem"] = None,
old_items: typing.List["DeliveryItem"] = None
) -> None:
self.delivery_id = delivery_id
self.items = items
self.old_items = old_items
super().__init__(type="DeliveryItemsUpdated")
def __repr__(self) -> str:
return (
"DeliveryItemsUpdatedMessagePayload(type=%r, delivery_id=%r, items=%r, old_items=%r)"
% (self.type, self.delivery_id, self.items, self.old_items)
)
class DeliveryRemovedMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DeliveryRemovedMessageSchema`."
#: :class:`commercetools.types.Delivery`
delivery: "Delivery"
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None,
delivery: "Delivery" = None
) -> None:
self.delivery = delivery
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="DeliveryRemoved",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"DeliveryRemovedMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r, delivery=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
self.type,
self.resource_user_provided_identifiers,
self.delivery,
)
)
class DeliveryRemovedMessagePayload(MessagePayload):
"Corresponding marshmallow schema is :class:`commercetools.schemas.DeliveryRemovedMessagePayloadSchema`."
#: :class:`commercetools.types.Delivery`
delivery: "Delivery"
def __init__(self, *, type: str = None, delivery: "Delivery" = None) -> None:
self.delivery = delivery
super().__init__(type="DeliveryRemoved")
def __repr__(self) -> str:
return "DeliveryRemovedMessagePayload(type=%r, delivery=%r)" % (
self.type,
self.delivery,
)
class InventoryEntryCreatedMessage(Message):
"Corresponding marshmallow schema is :class:`commercetools.schemas.InventoryEntryCreatedMessageSchema`."
#: :class:`commercetools.types.InventoryEntry` `(Named` ``inventoryEntry`` `in Commercetools)`
inventory_entry: "InventoryEntry"
def __init__(
self,
*,
id: str = None,
version: int = None,
created_at: datetime.datetime = None,
last_modified_at: datetime.datetime = None,
last_modified_by: typing.Optional["LastModifiedBy"] = None,
created_by: typing.Optional["CreatedBy"] = None,
sequence_number: int = None,
resource: "Reference" = None,
resource_version: int = None,
type: str = None,
resource_user_provided_identifiers: typing.Optional[
"UserProvidedIdentifiers"
] = None,
inventory_entry: "InventoryEntry" = None
) -> None:
self.inventory_entry = inventory_entry
super().__init__(
id=id,
version=version,
created_at=created_at,
last_modified_at=last_modified_at,
last_modified_by=last_modified_by,
created_by=created_by,
sequence_number=sequence_number,
resource=resource,
resource_version=resource_version,
type="InventoryEntryCreated",
resource_user_provided_identifiers=resource_user_provided_identifiers,
)
def __repr__(self) -> str:
return (
"InventoryEntryCreatedMessage(id=%r, version=%r, created_at=%r, last_modified_at=%r, last_modified_by=%r, created_by=%r, sequence_number=%r, resource=%r, resource_version=%r, type=%r, resource_user_provided_identifiers=%r, inventory_entry=%r)"
% (
self.id,
self.version,
self.created_at,
self.last_modified_at,
self.last_modified_by,
self.created_by,
self.sequence_number,
self.resource,
self.resource_version,
| |
<filename>cube.py<gh_stars>0
""" cube
Original PyCube written by <NAME>
Based and modified from original version found at:
http://stackoverflow.com/questions/30745703/rotating-a-cube-using-quaternions-in-pyopengl
"""
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import math
import numpy as np
from queue import Queue
from enums import FaceRotation
from geometry import Geometry
from helpers import LittleHelpers
from mathf import Mathf
from quat import *
from tween import *
class State(Enum):
IDLE = 0
TWEENING = 1
class Cube:
def __init__(self, settings, face_rotation_ease_type, sticker_texture_id, size=3):
default_color = (0, 0, 0)
self.padding = settings.cube_padding
self.draw_cubies = settings.cube_draw_cubies
self.draw_sphere = settings.cube_draw_sphere
self.draw_lines = settings.cube_draw_lines
self.line_width = settings.cube_line_width
self.inner_color = LittleHelpers.convert_hex_color_to_floats(settings.cube_inner_color, default_color)
self.sphere_color = LittleHelpers.convert_hex_color_to_floats(settings.cube_sphere_color, default_color)
self.angular_drag = settings.cube_angular_drag
self.scale_drag = settings.cube_scale_drag
self.min_scale = settings.cube_min_scale/size
self.max_scale = settings.cube_max_scale
self.size = size
self.rot_x = 0
self.rot_y = 0
self.accum = (1, 0, 0, 0)
self.scale = 1/(self.size/3)
self.scale_speed = 0
self.sphere_radius = 3
self.sphere_slices = 16
self.sphere_stacks = 16
self.face_rotation_tween_time = settings.cube_face_rotation_tween_time
self.face_rotation_ease_type = face_rotation_ease_type
self.texture_mapping_enabled = settings.texture_mapping_enabled
self.sticker_texture_id = sticker_texture_id
self.geometry = Geometry(self.size)
self.face_colors = (
(0, 154/255, 74/255), # Front: Green
(1, 86/255, 35/255), # Left: Orange
(0, 75/255, 171/255), # Back: Blue
(190/255, 15/255, 56/255), # Right: Red
(1, 1, 1), # Up: White
(1, 210/255, 44/255), # Down: Yellow
)
self.reset()
rx = settings.cube_initial_rotation_x * Mathf.DEG_TO_RAD
ry = settings.cube_initial_rotation_y * Mathf.DEG_TO_RAD
self.apply_rotation(rx, ry, settings.rotate_y_before_x_initially)
# auto-rotation
repeat = True
repetion_count = -1
if settings.cube_auto_rot_x_enabled:
begin = settings.cube_auto_rot_x_begin
end = settings.cube_auto_rot_x_end
jump_start = settings.cube_auto_rot_x_jump_start
ease = Tween.get_ease_type_by_name(settings.cube_auto_rot_x_ease_type)
self.auto_rot_x = Tween()
self.auto_rot_x.tween(begin, end, time, ease, repeat, repetition_count, jump_start)
else:
self.auto_rot_x = False
if settings.cube_auto_rot_y_enabled:
begin = settings.cube_auto_rot_y_begin
end = settings.cube_auto_rot_y_end
time = settings.cube_auto_rot_y_time
jump_start = settings.cube_auto_rot_y_jump_start
ease = Tween.get_ease_type_by_name(settings.cube_auto_rot_y_ease_type)
self.auto_rot_y = Tween()
self.auto_rot_y.tween(begin, end, time, ease, repeat, repetition_count, jump_start)
else:
self.auto_rot_y = False
def reset(self):
self.geometry = Geometry(self.size)
self.geometry.add_padding(self.padding)
self.queued_face_rotations = Queue(0)
self.tween = Tween()
self.state = State.IDLE
self.current_face_rotation = None
def apply_rotation(self, x, y, rotate_y_before_x=False):
qx = normalize(axisangle_to_q((1, 0, 0), x))
qy = normalize(axisangle_to_q((0, 1, 0), y))
if rotate_y_before_x:
self.accum = q_mult(self.accum, qy)
self.accum = q_mult(self.accum, qx)
else:
self.accum = q_mult(self.accum, qx)
self.accum = q_mult(self.accum, qy)
def update(self, elapsed_time):
self.rot_x -= abs(self.rot_x) * self.angular_drag * elapsed_time * np.sign(self.rot_x)
self.rot_y -= abs(self.rot_y) * self.angular_drag * elapsed_time * np.sign(self.rot_y)
qx = normalize(axisangle_to_q((1, 0, 0), self.rot_x))
qy = normalize(axisangle_to_q((0, 1, 0), self.rot_y))
self.accum = q_mult(self.accum, qx)
self.accum = q_mult(self.accum, qy)
self.scale_speed -= abs(self.scale_speed) * self.scale_drag * elapsed_time * np.sign(self.scale_speed)
self.scale += self.scale_speed
if self.scale < self.min_scale:
self.scale = self.min_scale
self.scale_speed = 0
if self.scale > self.max_scale:
self.scale = self.max_scale
self.scale_speed = 0
self.update_queue()
self.update_tween(elapsed_time)
self.update_face_tweening()
if self.auto_rot_x: self.auto_rot_x.update(elapsed_time)
if self.auto_rot_y: self.auto_rot_y.update(elapsed_time)
def render(self):
# glPushMatrix()
glLoadMatrixf(q_to_mat4(self.accum))
glScalef(self.scale, self.scale, self.scale)
if self.auto_rot_x:
rot_x = self.auto_rot_x.get_current()
glRotatef(rot_x, 1, 0, 0)
if self.auto_rot_y:
rot_y = self.auto_rot_y.get_current()
glRotatef(rot_y, 0, 1, 0)
if self.draw_sphere: self.render_sphere()
if self.draw_cubies:
if self.texture_mapping_enabled:
self.render_cubies_with_textures()
else:
self.render_cubies()
if self.draw_lines: self.render_lines()
# glPopMatrix()
def add_rotate_x(self, value): self.rot_x += value
def add_rotate_y(self, value): self.rot_y += value
def add_scale(self, value):
self.scale_speed += value
def reset_rotation(self):
self.rot_x = 0
self.rot_y = 0
self.accum = (1, 0, 0, 0)
def reset_scale(self): self.scale = 1
def stop_rotation(self):
self.rot_x = 0
self.rot_y = 0
def append_face_rotation(self, face_rotation):
if type(face_rotation) == FaceRotation:
self.queued_face_rotations.put(face_rotation)
def scramble(self, face_rotations):
theta = math.pi / 2
for face in face_rotations:
self.rotate_face(face, theta)
def map_colors(self, front_color, back_color, left_color, right_color, up_color, down_color):
self.face_colors = (front_color, left_color, back_color, right_color, up_color, down_color)
def update_queue(self):
if self.state == State.TWEENING or self.queued_face_rotations.empty(): return
self.current_face_rotation = self.queued_face_rotations.get_nowait()
self.state = State.TWEENING
self.tween.tween(0, math.pi/2, self.face_rotation_tween_time, self.face_rotation_ease_type)
def update_tween(self, elapsed_time):
if self.state != State.TWEENING: return
if self.tween.is_done():
self.state = State.IDLE
self.current_face_rotation = None
else:
self.tween.update(elapsed_time)
def update_face_tweening(self):
theta = self.tween.get_delta()
self.rotate_face(self.current_face_rotation, theta)
def rotate_face(self, face, theta):
if (face == FaceRotation.FRONT_CW or
face == FaceRotation.BACK_CCW or
face == FaceRotation.LEFT_CCW or
face == FaceRotation.RIGHT_CW or
face == FaceRotation.UP_CW or
face == FaceRotation.DOWN_CCW):
theta *= -1
if face == FaceRotation.FRONT_CW or face == FaceRotation.FRONT_CCW:
self.rotate_front_face(theta)
if face == FaceRotation.BACK_CW or face == FaceRotation.BACK_CCW:
self.rotate_back_face(theta)
if face == FaceRotation.LEFT_CW or face == FaceRotation.LEFT_CCW:
self.rotate_left_face(theta)
if face == FaceRotation.RIGHT_CW or face == FaceRotation.RIGHT_CCW:
self.rotate_right_face(theta)
if face == FaceRotation.UP_CW or face == FaceRotation.UP_CCW:
self.rotate_up_face(theta)
if face == FaceRotation.DOWN_CW or face == FaceRotation.DOWN_CCW:
self.rotate_down_face(theta)
def rotate_front_face(self, theta, layer=1):
for pieces in self.geometry.center_pieces[0]:
for piece in pieces:
for i in range(8):
piece[i] = z_rot(piece[i], theta)
#self.geometry.center_pieces[0][i] = z_rot(self.geometry.center_pieces[0][i], theta)
for axis in self.geometry.edge_pieces:
for pieces in axis:
for piece in pieces:
flag = True
for vertex in piece:
if vertex[2] < (self.size-1) - (layer*2):
#if vertex[2] < 0:
flag = False
break
if flag:
for i in range(8):
piece[i] = z_rot(piece[i], theta)
for piece in self.geometry.corner_pieces:
flag = True
for vertex in piece:
if vertex[2] < 0:
flag = False
break
if flag:
for i in range(8):
piece[i] = z_rot(piece[i], theta)
def rotate_back_face(self, theta, layer=1):
for pieces in self.geometry.center_pieces[2]:
for piece in pieces:
for i in range(8):
piece[i] = z_rot(piece[i], theta)
#self.geometry.center_pieces[2][i] = z_rot(self.geometry.center_pieces[2][i], theta)
for axis in self.geometry.edge_pieces:
for pieces in axis:
for piece in pieces:
flag = True
for vertex in piece:
#if vertex[2] > 0:
if vertex[2] > (-self.size+1) + (layer*2): # or vertex[2] > (-self.size-1) + (layer*2):
flag = False
break
if flag:
for i in range(8):
piece[i] = z_rot(piece[i], theta)
for piece in self.geometry.corner_pieces:
flag = True
for vertex in piece:
if vertex[2] > 0:
flag = False
break
if flag:
for i in range(8):
piece[i] = z_rot(piece[i], theta)
def rotate_left_face(self, theta, layer=1):
for pieces in self.geometry.center_pieces[1]:
for piece in pieces:
for i in range(8):
piece[i] = x_rot(piece[i], theta)
#self.geometry.center_pieces[1][i] = x_rot(self.geometry.center_pieces[1][i], theta)
for axis in self.geometry.edge_pieces:
for pieces in axis:
for piece in pieces:
flag = True
for vertex in piece:
#if vertex[0] > 0:
if vertex[0] > (-self.size+1) + (layer*2):
flag = False
break
if flag:
for i in range(8):
piece[i] = x_rot(piece[i], theta)
for piece in self.geometry.corner_pieces:
flag = True
for vertex in piece:
if vertex[0] > 0:
flag = False
break
if flag:
for i in range(8):
piece[i] = x_rot(piece[i], theta)
def rotate_right_face(self, theta, layer=1):
for pieces in self.geometry.center_pieces[3]:
for piece in pieces:
for i in range(8):
piece[i] = x_rot(piece[i], theta)
#self.geometry.center_pieces[3][i] = x_rot(self.geometry.center_pieces[3][i], theta)
for axis in self.geometry.edge_pieces:
for pieces in axis:
for piece in pieces:
flag = True
for vertex in piece:
#if vertex[0] < 0:
if vertex[0] < (self.size-1) - (layer*2):
flag = False
break
if flag:
for i in range(8):
piece[i] = x_rot(piece[i], theta)
for piece in self.geometry.corner_pieces:
flag = True
for vertex in piece:
if vertex[0] < 0:
flag = False
if flag:
for i in range(8):
piece[i] = x_rot(piece[i], theta)
def rotate_up_face(self, theta, layer=1):
for pieces in self.geometry.center_pieces[4]:
for piece in pieces:
for i in range(8):
piece[i] = y_rot(piece[i], theta)
#self.geometry.center_pieces[4][i] = y_rot(self.geometry.center_pieces[4][i], theta)
for axis in self.geometry.edge_pieces:
for pieces in axis:
for piece in pieces:
flag = True
for vertex in piece:
#if vertex[1] < 0:
if vertex[1] < (self.size-1) - (layer*2):
#if vertex[1] < self.size - layer*2:
flag = False
break
if flag:
for i in range(8):
piece[i] = y_rot(piece[i], theta)
for piece in self.geometry.corner_pieces:
flag = True
for vertex in piece:
if vertex[1] < 0:
flag = False
break
if flag:
for i in range(8):
piece[i] = y_rot(piece[i], theta)
def rotate_down_face(self, theta, layer=1):
for pieces in self.geometry.center_pieces[5]:
for piece in pieces:
for i in range(8):
piece[i] = y_rot(piece[i], theta)
#self.geometry.center_pieces[5][i] = y_rot(self.geometry.center_pieces[5][i], theta)
for axis in self.geometry.edge_pieces:
for pieces in axis:
for piece in pieces:
flag = True
for vertex in piece:
#if vertex[1] > 0:
if vertex[1] > (-self.size+1) + (layer*2):
#if vertex[1] > -self.size + layer*2:
flag = False
break
if flag:
for i in range(8):
piece[i] = y_rot(piece[i], theta)
for piece in self.geometry.corner_pieces:
flag = True
for vertex in piece:
if vertex[1] > 0:
flag = False
break
if flag:
for i in range(8):
piece[i] = y_rot(piece[i], theta)
def render_sphere(self):
glColor3f(self.sphere_color[0], self.sphere_color[1], self.sphere_color[2])
glutSolidSphere(self.sphere_radius, self.sphere_slices, self.sphere_stacks)
def render_axes(self):
glLineWidth(1)
glBegin(GL_LINES)
for color, axis in zip(self.geometry.axis_colors, self.geometry.axes):
glColor3f(color[0], color[1], color[2])
for point in axis:
p = self.geometry.axis_verts[point]
glVertex3f(p[0], p[1], | |
test examples
'''
return self.sess.run(self.preds, feed_dict=self.all_test_feed_dict)
def print_model_eval(self):
'''
print the evaluation of the model related information, such as loss, gradients, norm of parameters, etc.
'''
params_val = self.sess.run(self.params)
if self.mini_batch == True:
grad_loss_val, loss_no_reg_val, loss_val = self.minibatch_mean_eval(
[self.grad_total_loss_op, self.loss_no_reg, self.total_loss],
self.data_sets.train)
test_loss_val = self.minibatch_mean_eval(
[self.loss_no_reg],
self.data_sets.test)
else:
grad_loss_val, loss_no_reg_val, loss_val = self.sess.run(
[self.grad_total_loss_op, self.loss_no_reg, self.total_loss],
feed_dict=self.all_train_feed_dict)
test_loss_val = self.sess.run(
[self.loss_no_reg],
feed_dict=self.all_test_feed_dict)
print('Train loss (w reg) on all data: %s' % loss_val)
print('Train loss (w/o reg) on all data: %s' % loss_no_reg_val)
print('Test loss (w/o reg) on all data: %s' % test_loss_val)
print('Norm of the mean of gradients: %s' % np.linalg.norm(
np.concatenate([np.reshape(x, [-1, 1]) for x in grad_loss_val])))
print('Norm of the params: %s' % np.linalg.norm(np.concatenate([np.reshape(x, [-1, 1]) for x in params_val])))
def retrain(self, num_steps, feed_dict):
'''
retrain the model with num_steps iterations
num_steps: the number of iterations
feed_dict: the training examples
'''
for step in xrange(num_steps):
self.sess.run(self.train_op, feed_dict=feed_dict)
def update_learning_rate(self, step):
'''
update the learning rate
step: when the step or iteration is reached, update the learning rate
'''
if self.mini_batch:
assert self.num_train_examples % self.batch_size == 0
num_steps_in_epoch = self.num_train_examples / self.batch_size
else:
num_steps_in_epoch = 1
epoch = step // num_steps_in_epoch
multiplier = 1
if epoch < self.decay_epochs[0]:
multiplier = 1
elif epoch < self.decay_epochs[1]:
multiplier = 0.1
else:
multiplier = 0.01
self.sess.run(
self.update_learning_rate_op,
feed_dict={self.learning_rate_placeholder: multiplier * self.initial_learning_rate})
def train(self, num_steps,
iter_to_switch_to_batch=20000,
iter_to_switch_to_sgd=40000,
save_checkpoints=True, verbose=True,
):
"""
Trains a model for a specified number of steps.
num_steps: the number of iterations
iter_to_switch_to_batch: the number of iterations to switch to batch training
iter_to_switch_to_sgd: the number of iterations to switch to sgd optimizer
save_checkpoints: Whether to save the model at the checkpoints
verbose: whether to print the message during the training
"""
if verbose: print('Training for %s steps' % num_steps)
self.num_train_step = num_steps
self.iter_to_switch_to_batch = iter_to_switch_to_batch
self.iter_to_switch_to_sgd = iter_to_switch_to_sgd
sess = self.sess
# Tensorboard
# train_writer = tf.summary.FileWriter('./logs/{}/train'.format(datetime.datetime.now().strftime("%Y_%m_%d_%H_%M")),
# sess.graph)
# tf.summary.scalar("loss", self.total_loss)
# merged = tf.summary.merge_all()
org_loss = -100
err = []
for step in range(num_steps):
self.update_learning_rate(step)
start_time = time.time()
if step < iter_to_switch_to_batch:
feed_dict = self.fill_feed_dict_with_batch(self.data_sets.train)
_, loss_val = sess.run([self.train_op, self.total_loss], feed_dict=feed_dict)
# loss_val = sess.run([self.total_loss], feed_dict=feed_dict)
elif step < iter_to_switch_to_sgd:
feed_dict = self.all_train_feed_dict
_, loss_val = sess.run([self.train_op, self.total_loss], feed_dict=feed_dict)
else:
feed_dict = self.all_train_feed_dict
_, loss_val = sess.run([self.train_sgd_op, self.total_loss], feed_dict=feed_dict)
duration = time.time() - start_time
# train_writer.add_summary(summary, step)
if (step + 1) % 1000 == 0:
# Print status to stdout.
if verbose:
print('Step %d: loss = %.8f (%.3f sec)' % (step, loss_val, duration))
# if(abs(loss_val - org_loss) < epsilon):
# break
# org_loss = loss_val
err.append((step, loss_val))
# Save a checkpoint and evaluate the model periodically.
if (step + 1) % 10000 == 0 or (step + 1) == num_steps:
if save_checkpoints: self.saver.save(sess, self.checkpoint_file, global_step=step)
if verbose: self.print_model_eval()
return err
# train_writer.flush()
def load_checkpoint(self, iter_to_load, do_checks=True):
'''
load the model at the checkpoint
iter_to_load: the number of iteration where the model is saved
do_checks: print the informaiton of the model after loading
'''
checkpoint_to_load = "%s_%s" % (self.checkpoint_file, iter_to_load)
self.saver.restore(self.sess, checkpoint_to_load)
if do_checks:
print('Model %s loaded. Sanity checks ---' % checkpoint_to_load)
self.print_model_eval()
def save(self, file_name, do_checks=False):
'''
save the model at the checkpoint
file_name: the name of the file with which the model is saved
do_checks: print the information of the model after loading
'''
self.saver.save(self.sess, file_name)
if do_checks:
print('Model %s sSaved. Sanity checks ---' % file_name)
self.print_model_eval()
def restore(self, file_name, do_checks=False):
'''
load the model at the checkpoint
file_name: the name of the file with which the model is saved
do_checks: print the information of the model after loading
'''
self.saver.restore(self.sess, file_name)
if do_checks:
print('Model %s loaded. Sanity checks ---' % file_name)
self.print_model_eval()
def restore_train(self, file_name, init_step,
iter_to_switch_to_batch=20000,
iter_to_switch_to_sgd=40000,
):
"""
Trains a model for a specified number of steps.
file_name: the name of the file with which the model is saved
init_step: the threshold to train the model with different learning rate, different batches, etc.
iter_to_switch_to_batch: the number of iterations to switch to batch training
iter_to_switch_to_sgd: the number of iterations to switch to sgd optimizer
"""
self.num_train_step = init_step
self.iter_to_switch_to_batch = iter_to_switch_to_batch
self.iter_to_switch_to_sgd = iter_to_switch_to_sgd
sess = self.sess
org_loss = -100
err = []
self.update_learning_rate(init_step)
start_time = time.time()
if init_step < iter_to_switch_to_batch:
feed_dict = self.fill_feed_dict_with_batch(self.data_sets.train)
_, loss_val = sess.run([self.train_op, self.total_loss], feed_dict=feed_dict)
# loss_val = sess.run([self.total_loss], feed_dict=feed_dict)
elif init_step < iter_to_switch_to_sgd:
feed_dict = self.all_train_feed_dict
_, loss_val = sess.run([self.train_op, self.total_loss], feed_dict=feed_dict)
else:
feed_dict = self.all_train_feed_dict
_, loss_val = sess.run([self.train_sgd_op, self.total_loss], feed_dict=feed_dict)
duration = time.time() - start_time
self.load_checkpoint(file_name, do_checks=True)
# train_writer.add_summary(summary, step)
print('inital loss = %.8f (%.3f sec)' % (loss_val))
def get_train_op(self, total_loss, global_step, learning_rate):
"""
Return train_op
total_loss: the loss function to be optimized
global_step: the global step for the optimizer
learning_rate: the learning rate of the adam optimizer
"""
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(total_loss, global_step=global_step, )
return train_op
def get_train_sgd_op(self, total_loss, global_step, learning_rate=0.001):
"""
Return train_sgd_op
total_loss: the loss function to be optimized
global_step: the global step for the optimizer
learning_rate: the learning rate of the SGD optimizer
"""
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(total_loss, global_step=global_step)
return train_op
def get_accuracy_op(self, logits, labels):
"""Evaluate the quality of the logits at predicting the label.
Args:
logits: Logits tensor, float - [batch_size, NUM_CLASSES].
labels: Labels tensor, int32 - [batch_size], with values in the
range [0, NUM_CLASSES).
Returns:
A scalar int32 tensor with the number of examples (out of batch_size)
that were predicted correctly.
"""
# correct = tf.nn.in_top_k(logits, labels, 1)
# return tf.reduce_sum(tf.cast(correct, tf.int32)) / tf.shape(labels)[0]
return np.NaN
def loss(self, yhat, y):
'''
the l2 norm between yhat and y. In addition, we try to remove the nan value after L2 norm computation.
yhat: the prediction of the label
y: the label
'''
indiv_loss_no_reg = tf.squared_difference(yhat, y, name='indiv_loss')
# indiv_loss_no_reg = tf.Print(indiv_loss_no_reg, [yhat[0], y[0], indiv_loss_no_reg[0]])
# neglect nans when do the average
loss_no_reg = tf.reduce_mean(tf.boolean_mask(indiv_loss_no_reg,
tf.logical_not(tf.is_nan(indiv_loss_no_reg))),
name='loss_no_reg')
# loss_no_reg = tf.Print(loss_no_reg, [loss_no_reg])
tf.add_to_collection('losses', loss_no_reg)
total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
# total_loss = tf.Print(total_loss, [yhat[0], y[0], indiv_loss_no_reg[0], loss_no_reg, total_loss])
return total_loss, loss_no_reg, indiv_loss_no_reg
def adversarial_loss(self, logits, labels):
return 0, 0
def update_feed_dict_with_v_placeholder(self, feed_dict, vec):
for pl_block, vec_block in zip(self.v_placeholder, vec):
shp = pl_block.get_shape().as_list()
shp = [-1 if x is None else x for x in shp]
feed_dict[pl_block] = np.reshape(vec_block, shp)
return feed_dict
def get_inverse_hvp(self, v, approx_type='cg', approx_params=None, verbose=True):
assert approx_type in ['cg', 'lissa']
if approx_type == 'lissa':
return self.get_inverse_hvp_lissa(v, **approx_params)
elif approx_type == 'cg':
return self.get_inverse_hvp_cg(v, verbose)
def get_inverse_hvp_lissa(self, v,
batch_size=None,
scale=10, damping=0.0, num_samples=1, recursion_depth=10000):
"""
This uses mini-batching; uncomment code for the single sample case.
"""
inverse_hvp = None
print_iter = recursion_depth / 10
for i in range(num_samples):
# samples = np.random.choice(self.num_train_examples, size=recursion_depth)
cur_estimate = v
for j in range(recursion_depth):
feed_dict = self.fill_feed_dict_with_batch(self.data_sets.train, batch_size=batch_size)
feed_dict = self.update_feed_dict_with_v_placeholder(feed_dict, cur_estimate)
hessian_vector_val = self.sess.run(self.hessian_vector, feed_dict=feed_dict)
cur_estimate = [a + (1 - damping) * b - c / scale
for (a, b, c) in zip(v, cur_estimate, hessian_vector_val)]
# Update: v + (I - Hessian_at_x) * cur_estimate
if (j % print_iter == 0) or (j == recursion_depth - 1):
print(
"Recursion at depth %s: norm is %.8lf" % (j, np.linalg.norm(self.list_to_vec((cur_estimate)))))
feed_dict = self.update_feed_dict_with_v_placeholder(feed_dict, cur_estimate)
if inverse_hvp is None:
inverse_hvp = [b / scale for b in cur_estimate]
else:
inverse_hvp = [a + b / scale for (a, b) in zip(inverse_hvp, cur_estimate)]
inverse_hvp = [a / num_samples for a in inverse_hvp]
return inverse_hvp
def minibatch_hessian_vector_val(self, v):
num_examples = self.num_train_examples
if self.mini_batch == True:
batch_size = 100
assert num_examples % batch_size == 0
else:
batch_size = self.num_train_examples
num_iter = int(num_examples / batch_size)
self.reset_datasets()
hessian_vector_val = None
for i in range(num_iter):
feed_dict = self.fill_feed_dict_with_batch(self.data_sets.train, batch_size=batch_size)
# Can optimize this
feed_dict = self.update_feed_dict_with_v_placeholder(feed_dict, [v])
hessian_vector_val_temp = self.sess.run(self.hessian_vector, feed_dict=feed_dict)
if hessian_vector_val is None:
hessian_vector_val = [b / float(num_iter) for b in hessian_vector_val_temp]
else:
hessian_vector_val = [a + (b / float(num_iter)) for (a, b) in
zip(hessian_vector_val, hessian_vector_val_temp)]
hessian_vector_val = [a + self.damping * b for (a, b) in zip(hessian_vector_val, v)]
return hessian_vector_val
| |
arc tangent of two data arrays `x` and `y`. It is similar to calculating the arc tangent of `y/x`,
except that the signs of both arguments are used to determine the quadrant of the result. Works on radians only.
The no-data value np.nan is passed through and therefore gets propagated if any of the arguments is null.
Parameters
----------
y : xr.DataArray
Numbers to be used as dividend.
x : xr.DataArray
Numbers to be used as divisor.
Returns
-------
xr.DataArray :
The computed angles in radians.
"""
arct = xr.ufuncs.arctan2(y, x)
return keep_attrs(x, y, arct)
@staticmethod
def exec_da():
pass
########################################################################################################################
# linear_scale_range Process
########################################################################################################################
@process
def linear_scale_range():
"""
Returns class instance of `LinearScaleRange`.
For more details, please have a look at the implementations inside `LinearScaleRange`.
Returns
-------
LinearScaleRange :
Class instance implementing all 'linear_scale_range' processes.
"""
return LinearScaleRange()
class LinearScaleRange:
"""
Class implementing all 'linear_scale_range' processes.
"""
@staticmethod
def exec_num(x, inputMin, inputMax, outputMin=0., outputMax=1.):
"""
Performs a linear transformation between the input and output range. The underlying formula is:
`((x - input_min) / (input_max - input_min)) * (output_max - output_min) + output_min`.
Potential use case include scaling values to the 8-bit range (0 - 255) often used for numeric representation of
values in one of the channels of the RGB colour model or calculating percentages (0 - 100).
The no-data value None is passed through and therefore gets propagated.
Parameters
----------
x : int or float
A number to transform.
input_min : int or float
Minimum value the input can obtain.
input_max : int or float
Maximum value the input can obtain.
output_min : int or float, optional
Minimum value of the desired output range (default is 0.).
output_max : int or float, optional
Maximum value of the desired output range (default is 1.).
Returns
-------
float :
The transformed number.
"""
return LinearScaleRange.exec_np(x, inputMin, inputMax,
outputMin=outputMin,
outputMax=outputMax) if x is not None else x
@staticmethod
def exec_np(x, inputMin, inputMax, outputMin=0., outputMax=1.):
"""
Performs a linear transformation between the input and output range. The underlying formula is:
`((x - input_min) / (input_max - input_min)) * (output_max - output_min) + output_min`.
Potential use case include scaling values to the 8-bit range (0 - 255) often used for numeric representation of
values in one of the channels of the RGB colour model or calculating percentages (0 - 100).
The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
x : np.array
Numbers to transform.
input_min : int or float
Minimum value the input can obtain.
input_max : int or float
Maximum value the input can obtain.
output_min : int or float, optional
Minimum value of the desired output range (default is 0.).
output_max : int or float, optional
Maximum value of the desired output range (default is 1.).
Returns
-------
np.array :
The transformed numbers.
"""
return ((x - inputMin) / (inputMax - inputMin)) * (outputMax - outputMin) + outputMin
@staticmethod
def exec_xar(x, inputMin, inputMax, outputMin=0., outputMax=1.):
"""
Performs a linear transformation between the input and output range. The underlying formula is:
`((x - input_min) / (input_max - input_min)) * (output_max - output_min) + output_min`.
Potential use case include scaling values to the 8-bit range (0 - 255) often used for numeric representation of
values in one of the channels of the RGB colour model or calculating percentages (0 - 100).
The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
x : xr.DataArray
Numbers to transform.
input_min : int or float
Minimum value the input can obtain.
input_max : int or float
Maximum value the input can obtain.
output_min : int or float, optional
Minimum value of the desired output range (default is 0.).
output_max : int or float, optional
Maximum value of the desired output range (default is 1.).
Returns
-------
xr.DataArray :
The transformed numbers.
"""
lsr = ((x - inputMin) / (inputMax - inputMin)) * (outputMax - outputMin) + outputMin
lsr.attrs = x.attrs
return lsr
@staticmethod
def exec_da():
pass
########################################################################################################################
# Scale Process
########################################################################################################################
@process
def scale():
"""
Returns class instance of `Scale`.
For more details, please have a look at the implementations inside `Scale`.
Returns
-------
Scale :
Class instance implementing all 'scale' processes.
"""
return Scale()
class Scale:
"""
Class implementing all 'scale' processes.
"""
@staticmethod
def exec_num(x, factor=1.):
"""
Scales `x` with a multiplicand `factor`.
The no-data value None is passed through and therefore gets propagated.
Parameters
----------
x : int or float
A number to scale.
factor : int or float, optional
The scale factor/multiplicand (default is 1.).
Returns
-------
float :
The scaled number.
"""
return x*factor if x is not None else x
@staticmethod
def exec_np(x, factor=1.):
"""
Scales `x` with a multiplicand `factor`.
The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
x : np.array
A number to scale.
factor : int or float, optional
The scale factor/multiplicand (default is 1.).
Returns
-------
np.array :
The scaled numbers.
"""
return x*factor
@staticmethod
def exec_xar(x, factor=1.):
"""
Scales `x` with a multiplicand `factor`.
The no-data value np.nan is passed through and therefore gets propagated.
Parameters
----------
x : xr.DataArray
A number to scale.
factor : int or float, optional
The scale factor/multiplicand (default is 1.).
Returns
-------
xr.DataArray :
The scaled numbers.
"""
s = x*factor
s.attrs = x.attrs
return s
@staticmethod
def exec_da():
pass
########################################################################################################################
# Mod Process
########################################################################################################################
@process
def mod():
"""
Returns class instance of `Mod`.
For more details, please have a look at the implementations inside `Mod`.
Returns
-------
Mod :
Class instance implementing all 'mod' processes.
"""
return Mod()
class Mod:
"""
Class implementing all 'mod' processes.
"""
@staticmethod
def exec_num(x, y):
"""
Remainder after division of `x` by `y`. The result of a modulo operation has the sign of the divisor.
The no-data value None is passed through and therefore gets propagated if any of the arguments is None.
Parameters
----------
x : int or float
A number to be used as dividend.
y : int or float
A number to be used as divisor.
Returns
-------
float :
The remainder after division.
"""
return x % y if x is not None and y is not None else None
@staticmethod
def exec_np(x, y):
"""
Remainder after division of `x` by `y`. The result of a modulo operation has the sign of the divisor.
The no-data value None is passed through and therefore gets propagated if any of the arguments is None.
Parameters
----------
x : np.array
Numbers to be used as dividend.
y : np.array
Numbers to be used as divisor.
Returns
-------
np.array :
The remainders after division.
"""
return np.mod(x, y)
@staticmethod
def exec_xar(x, y):
"""
Remainder after division of `x` by `y`. The result of a modulo operation has the sign of the divisor.
The no-data value None is passed through and therefore gets propagated if any of the arguments is None.
Parameters
----------
x : xr.DataArray
Numbers to be used as dividend.
y : xr.DataArray
Numbers to be used as divisor.
Returns
-------
xr.DataArray :
The remainders after division.
"""
if x is None or y is None:
return None
m = x % y
return keep_attrs(x, y, m)
@staticmethod
def exec_da():
pass
########################################################################################################################
# Absolute Process
########################################################################################################################
@process
def absolute():
"""
Returns class instance of `Absolute`.
For more details, please have a look at the implementations inside `Absolute`.
Returns
-------
Mod :
Class instance implementing all 'absolute' processes.
"""
return Absolute()
class Absolute:
"""
Class implementing all 'absolute' processes.
"""
@staticmethod
def exec_num(x):
"""
Computes the absolute value of a real number `x`, which is the "unsigned" portion of `x` and
often denoted as `|x|`. The no-data value None is passed through and therefore gets propagated.
Parameters
----------
x : int or float
A number.
Returns
-------
int or float :
The computed absolute value.
"""
return abs(x) if x is not None else | |
number of points to sample at between the starting and stopping phase.
:param num_shots: The number of shots to average over for each data point.
:return: pandas DataFrame
'''
cz_expriment = []
rz_qubit = [] # this is the qubit to which the RZ is applied
for edge in edges:
qubit, other_qubit = edge
# first qubit gets RZ
cz_expriment.append({
'Edge': tuple(edge),
'Rz_qubit': qubit,
'Program': generate_cz_phase_ramsey_program(qubit, other_qubit, num_shots),
'Start_phase': start_phase,
'Stop_phase': stop_phase,
'Num_points': num_points,
'Num_shots': num_shots,
})
# second qubit gets RZ
cz_expriment.append({
'Edge': tuple(edge),
'Rz_qubit': other_qubit,
'Program': generate_cz_phase_ramsey_program(other_qubit, qubit, num_shots),
'Start_phase': start_phase,
'Stop_phase': stop_phase,
'Num_points': num_points,
'Num_shots': num_shots,
})
return pd.DataFrame(cz_expriment)
def acquire_data_cz_phase_ramsey(qc: QuantumComputer,
cz_experiment: pd.DataFrame,
filename: str = None) -> pd.DataFrame:
"""
Execute experiments to measure the RZ incurred as a result of a CZ gate.
:param qc: The qubit to move around the Bloch sphere and measure the incurred RZ on
:param cz_experiment: pandas DataFrame
:param filename: The name of the file to write JSON-serialized results to
:return: pandas DataFrame
"""
results = []
for index, row in cz_experiment.iterrows():
parametric_ramsey_prog = row['Program']
edge = row['Edge']
rz_qb = row['Rz_qubit']
start_phase = row['Start_phase']
stop_phase = row['Stop_phase']
num_points = row['Num_points']
num_shots = row['Num_shots']
binary = compile_parametric_program(qc, parametric_ramsey_prog, num_shots=num_shots)
qc.qam.load(binary)
for theta in np.linspace(start_phase, stop_phase, num_points):
qc.qam.write_memory(region_name='theta', value=theta)
qc.qam.run()
qc.qam.wait()
bitstrings = qc.qam.read_from_memory_region(region_name="ro")
avg = np.mean(bitstrings[:, 0])
results.append({
'Edge': edge,
'Rz_qubit': rz_qb,
'Phase': theta,
'Num_bitstrings': len(bitstrings),
'Average': float(avg),
})
if filename:
pd.DataFrame(results).to_json(filename)
return pd.DataFrame(results)
def estimate_cz_phase_ramsey(df: pd.DataFrame) -> pd.DataFrame:
"""
Estimate CZ phase ramsey experimental data.
:param df: Experimental results to plot and fit exponential decay curve to.
:param detuning: Detuning frequency used in experiment creation.
:return: List of dicts.
"""
results = []
edges = df['Edge'].unique()
for id_row, edge in enumerate(edges):
for id_col, qubit in enumerate(edge):
qubit_df = df[(df['Rz_qubit'] == qubit) & (df['Edge'] == edge)].sort_values('Phase')
phases = qubit_df['Phase']
prob_of_one = qubit_df['Average']
rz_qb = qubit_df['Rz_qubit'].values[0]
try:
# fit to sinusoid
fit_params, fit_params_errs = fit_to_sinusoidal_waveform(phases, prob_of_one)
results.append({
'Edge': edge,
'Rz_qubit': rz_qb,
'Angle': fit_params[1],
'Prob_of_one': fit_params[2],
'Fit_params': fit_params,
'Fit_params_errs': fit_params_errs,
'Message': None,
})
except RuntimeError:
print(f"Could not fit to experimental data for edge {edge}")
results.append({
'Edge': edge,
'Rz_qubit': rz_qb,
'Angle': None,
'Prob_of_one': None,
'Fit_params': None,
'Fit_params_errs': None,
'Message': 'Could not fit to experimental data for edge' + str(edge),
})
return pd.DataFrame(results)
def plot_cz_phase_estimate_over_data(df: pd.DataFrame,
filename: str = None) -> None:
"""
Plot Ramsey experimental data, the fitted sinusoid, and the maximum of that sinusoid.
:param df: Experimental results to plot and fit exponential decay curve to.
:return: None
"""
edges = df['Edge'].unique()
if len(edges) == 1:
# this deals with the one edge case, then plot will have an empty row
# if you don't do this you get `axes.shape = (2,)`
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(24, 30))
else:
fig, axes = plt.subplots(nrows=len(edges), ncols=2, figsize=(24, 10 * len(edges)))
for id_row, edge in enumerate(edges):
for id_col, qubit in enumerate(edge):
qubit_df = df[(df['Rz_qubit'] == qubit) & (df['Edge'] == edge)].sort_values('Phase')
phases = qubit_df['Phase']
prob_of_one = qubit_df['Average']
# plot raw data
axes[id_row, id_col].plot(phases, prob_of_one, 'o',
label=f"qubit{qubit} CZ Ramsey data")
try:
# fit to sinusoid
fit_params, fit_params_errs = fit_to_sinusoidal_waveform(phases,
prob_of_one)
except RuntimeError:
print(f"Could not fit to experimental data for qubit {qubit}")
else:
# find max excited state visibility (ESV) and propagate error from fit params
max_ESV, max_ESV_err = get_peak_from_fit_params(fit_params, fit_params_errs)
# overlay fitted curve and vertical line at maximum ESV
axes[id_row, id_col].plot(phases, sinusoidal_waveform(phases, *fit_params),
label=f"QC{qubit} fitted line")
axes[id_row, id_col].axvline(max_ESV,
label=f"QC{qubit} max ESV={max_ESV:.3f}+/-{max_ESV_err:.3f} rad")
axes[id_row, id_col].set_xlabel("Phase on second +X/2 gate [rad]")
axes[id_row, id_col].set_ylabel("Pr($|1\langle)")
axes[id_row, id_col].set_title(f"CZ Phase Ramsey fringes on QC{qubit}\n"
f"due to CZ_{edge[0]}_{edge[1]} application")
axes[id_row, id_col].legend(loc='best')
if filename is not None:
plt.savefig(filename)
plt.show()
# ==================================================================================================
# Fits and so forth
# ==================================================================================================
def exponential_decay_curve(t: Union[float, np.ndarray],
amplitude: float,
time_decay_constant: float,
t_offset: float = 0.0) -> Union[float, np.ndarray]:
"""
Calculate exponential decay at a series of points.
:param t: The independent variable with respect to which decay is calculated.
:param amplitude: The amplitude of the decay curve.
:param time_decay_constant: The time decay constant - in this case T1 - of the decay curve.
:param t_offset: The time offset of the curve, assumed to be 0.0.
:return: The exponential decay at the point(s) in time.
"""
return amplitude * np.exp(-1 * (t - t_offset) / time_decay_constant)
def fit_to_exponential_decay_curve(x_data: np.ndarray,
y_data: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Fit experimental data to exponential decay curve.
:param x_data: Independent data to fit to.
:param y_data: Experimental, dependent data to fit to.
:return: Arrays of fitted decay curve parameters and their errors
"""
params, params_covariance = optimize.curve_fit(exponential_decay_curve,
x_data, y_data,
p0=[1.0, 15e-6, 0.0])
# parameter error extraction from
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
params_errs = np.sqrt(np.diag(params_covariance))
return params, params_errs
def sinusoidal_waveform(x: float,
amplitude: float,
baseline: float,
frequency: float,
x_offset: float) -> np.ufunc:
"""
Calculate sinusoidal response at a series of points.
:param x: The independent variable with respect to which the sinusoidal response is calculated.
:param amplitude: The amplitude of the sinusoid.
:param baseline: The baseline of the sinusoid.
:param frequency: The frequency of the sinusoid.
:param x_offset: The x offset of the sinusoid.
:return: The sinusoidal response at the given phases(s).
"""
return amplitude * np.sin(frequency * x + x_offset) + baseline
def fit_to_sinusoidal_waveform(x_data: np.ndarray,
y_data: List[float],
displayflag: bool = False,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Fit experimental data to sinusoid.
:param x_data: Independent data to fit to.
:param y_data: Experimental, dependent data to fit to.
:param displayflag: If True displays results from scipy curve fit analysis.
:return: Arrays of fitted decay curve parameters and their standard deviations
"""
params, params_covariance = optimize.curve_fit(sinusoidal_waveform, x_data, y_data,
p0=[0.5, 0.5, 1.0, np.pi / 2])
# parameter error extraction from
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
params_errs = np.sqrt(np.diag(params_covariance))
# interleave params and params_errs
print_params = []
for idx in range(len(params)):
print_params.append(params[idx])
print_params.append(params_errs[idx])
if displayflag:
print("scipy curve fitting analysis returned\n"
"amplitude:\t{:.5f} +/- {:.5f}\n"
"baseline:\t{:.5f} +/- {:.5f}\n"
"frequency:\t{:.5f} +/- {:.5f}\n"
"x offset:\t{:.5f} +/- {:.5f}".format(*print_params))
return params, params_errs
def get_peak_from_fit_params(fit_params: np.ndarray,
fit_params_errs: np.ndarray) -> Tuple[float, float]:
"""
Extract peak from the fit parameters returned by scipy.optimize.curve_fit.
:param fit_params: fit parameters out of scipy.optimize.curve_fit
:param fit_params_errs: standard deviations on the fit parameters from scipy.optimize.curve_fit
:return: The phase corresponding the to the maximum excited state visibility and its st. dev.
"""
# TODO: do away with hard-coded indices for fit params
x0 = fit_params[-1]
x0_err = fit_params_errs[-1]
freq = fit_params[-2]
freq_err = fit_params_errs[-2]
print("propagating error using x_0 = {} +/- {} and freq = {} +/- {}".format(x0, x0_err,
freq, freq_err))
# find the phase corresponding to maximum excited state visibility (ESV) using the fit params
max_ESV = (np.pi / 2 - x0) / freq
# max_ESV_err obtained by applying error propagation formula to max_ESV
max_ESV_err = np.sqrt((x0_err / freq) ** 2 + ((np.pi / 2 - x0) * (freq_err / freq ** 2)) ** 2)
print("\nmaximum excited state visibility observed at x = {} +/- {}".format(max_ESV,
max_ESV_err))
return max_ESV, max_ESV_err
def exponentially_decaying_sinusoidal_curve(t: Union[float, np.ndarray],
amplitude: float,
time_decay_constant: float,
frequency: float,
baseline: float,
sin_t_offset: float = 0.0) -> Union[float, np.ndarray]:
"""
Calculate exponentially decaying sinusoid at a series of points.
:param t: The independent variable with respect to which decay is calculated.
:param amplitude: The amplitude of the decay curve.
:param time_decay_constant: The time decay constant - in this case T2 - of the decay curve.
:param frequency: The frequency to fit to the Ramsey fringes.
:param baseline: The baseline of the Ramsey fringes.
:param sin_t_offset: The time offset of the sinusoidal curve, assumed to be 0.0.
:return: The exponentially decaying sinusoid evaluated at the point(s) in time.
"""
return amplitude * np.exp(-1 * t / time_decay_constant) * \
np.sin(frequency * (t - sin_t_offset)) + baseline
def fit_to_exponentially_decaying_sinusoidal_curve(x_data: np.ndarray,
y_data: np.ndarray,
detuning: float = 5e6) -> Tuple[np.ndarray,
np.ndarray]:
"""
Fit experimental data to exponential decay curve.
:param x_data: Independent data to fit to.
:param y_data: Experimental, dependent data to fit to.
:param detuning: Detuning frequency used in experiment creation.
:return: Arrays of fitted decay curve parameters and their errors
"""
params, params_covariance = optimize.curve_fit(exponentially_decaying_sinusoidal_curve,
| |
#!/usr/bin/python3.9
# Copyright (c) 2021 MobileCoin Inc.
# Copyright (c) 2021 The Forest Team
import argparse
import asyncio
import json
import logging
import os
import shutil
import socket
import sys
import time
from io import BytesIO
from pathlib import Path
from tarfile import TarFile
from typing import Any, Callable, Optional
try:
# normally in a package
from forest import pghelp, utils
except ImportError:
# maybe we're local?
try:
import pghelp # type: ignore
import utils # type: ignore
except ImportError:
# i wasn't asking
sys.path.append("forest")
sys.path.append("..")
import pghelp # type: ignore # pylint: disable=ungrouped-imports
import utils # type: ignore # pylint: disable=ungrouped-imports
if utils.get_secret("MIGRATE"):
get_datastore = "SELECT account, datastore FROM {self.table} WHERE id=$1"
else:
get_datastore = "SELECT datastore FROM {self.table} WHERE id=$1"
class DatastoreError(Exception):
pass
AccountPGExpressions = pghelp.PGExpressions(
table="signal_accounts",
# rename="ALTAR TABLE IF EXISTS prod_users RENAME TO {self.table}",
migrate="ALTER TABLE IF EXISTS {self.table} ADD IF NOT EXISTS datastore BYTEA, ADD IF NOT EXISTS notes TEXT",
create_table="CREATE TABLE IF NOT EXISTS {self.table} \
(id TEXT PRIMARY KEY, \
datastore BYTEA, \
last_update_ms BIGINT, \
last_claim_ms BIGINT, \
active_node_name TEXT, \
notes TEXT);",
is_registered="SELECT datastore is not null as registered FROM {self.table} WHERE id=$1",
get_datastore=get_datastore,
get_claim="SELECT active_node_name FROM {self.table} WHERE id=$1",
mark_account_claimed="UPDATE {self.table} \
SET active_node_name = $2, \
last_claim_ms = (extract(epoch from now()) * 1000) \
WHERE id=$1;",
mark_account_freed="UPDATE {self.table} SET last_claim_ms = 0, \
active_node_name = NULL WHERE id=$1;",
get_free_account="SELECT id, datastore FROM {self.table} \
WHERE active_node_name IS NULL \
AND last_claim_ms = 0 \
LIMIT 1;",
upload="INSERT INTO {self.table} (id, datastore, last_update_ms) \
VALUES($1, $2, (extract(epoch from now()) * 1000)) \
ON CONFLICT (id) DO UPDATE SET \
datastore = $2, last_update_ms = EXCLUDED.last_update_ms;",
free_accounts_not_updated_in_the_last_hour="UPDATE {self.table} \
SET last_claim_ms = 0, active_node_name = NULL \
WHERE last_update_ms < ((extract(epoch from now())-3600) * 1000);",
get_timestamp="select last_update_ms from {self.table} where id=$1",
)
def get_account_interface() -> pghelp.PGInterface:
return pghelp.PGInterface(
query_strings=AccountPGExpressions,
database=utils.get_secret("DATABASE_URL"),
)
class SignalDatastore:
"""
Download, claim, mount, and sync a signal datastore
"""
def __init__(self, number: str):
self.account_interface = get_account_interface()
formatted_number = utils.signal_format(number)
if isinstance(formatted_number, str):
self.number: str = formatted_number
else:
raise Exception("not a valid number")
logging.info("SignalDatastore number is %s", self.number)
self.filepath = "data/" + number
# await self.account_interface.create_table()
setup_tmpdir() # shouldn't do anything if not running locally
def is_registered_locally(self) -> bool:
try:
return json.load(open(self.filepath))["registered"]
except (FileNotFoundError, json.JSONDecodeError, KeyError) as e:
logging.error(e)
return False
async def is_claimed(self) -> Optional[str]:
record = await self.account_interface.get_claim(self.number)
if not record:
logging.warning("checking claim without plus instead")
record = await self.account_interface.get_claim(self.number[1:])
if record:
return record[0].get("active_node_name")
raise Exception(f"no record in db for {self.number}")
return record[0].get("active_node_name")
async def download(self) -> None:
"""Fetch our account datastore from postgresql and mark it claimed"""
logging.info("datastore download entered")
await self.account_interface.free_accounts_not_updated_in_the_last_hour()
for i in range(5):
logging.info("checking claim")
claim = await self.is_claimed()
if not claim:
logging.info("no account claim!")
break
# you can also try to kill the other process
logging.info(
"this account is claimed by %s, waiting",
claim,
)
await asyncio.sleep(6)
if i == 4:
logging.info("time's up")
logging.info("downloading")
record = await self.account_interface.get_datastore(self.number)
if not record and utils.get_secret("MIGRATE"):
logging.warning("trying without plus")
record = await self.account_interface.get_datastore(
self.number.removeprefix("+")
)
logging.info("got datastore from pg")
if json_data := record[0].get("account"):
# legacy json-only field
loaded_data = json.loads(json_data)
if "username" in loaded_data:
try:
os.mkdir("data")
except FileExistsError:
pass
open("data/" + loaded_data["username"], "w").write(json_data)
return
buffer = BytesIO(record[0].get("datastore"))
tarball = TarFile(fileobj=buffer)
fnames = [member.name for member in tarball.getmembers()]
logging.debug(fnames[:2])
logging.info(
"expected file %s exists: %s",
self.filepath,
self.filepath in fnames,
)
tarball.extractall(utils.ROOT_DIR)
# open("last_downloaded_checksum", "w").write(zlib.crc32(buffer.seek(0).read()))
hostname = socket.gethostname()
await self.account_interface.mark_account_claimed(self.number, hostname)
logging.debug("marked account as claimed, asserting that this is the case")
assert await self.is_claimed()
return
def tarball_data(self) -> Optional[bytes]:
"""Tarball our data files"""
if not self.is_registered_locally():
logging.error("datastore not registered. not uploading")
return None
# fixme: check if the last thing we downloaded/uploaded
# is older than the last thing in the db
buffer = BytesIO()
tarball = TarFile(fileobj=buffer, mode="w")
try:
tarball.add(self.filepath)
try:
tarball.add(self.filepath + ".d")
except FileNotFoundError:
logging.info("ignoring no %s", self.filepath + ".d")
except FileNotFoundError:
logging.warning(
"couldn't find %s in %s, adding data instead",
self.filepath + ".d",
os.getcwd(),
)
tarball.add("data")
fnames = [member.name for member in tarball.getmembers()]
logging.debug(fnames[:2])
tarball.close()
buffer.seek(0)
data = buffer.read()
return data
async def upload(self) -> Any:
"""Puts account datastore in postgresql."""
data = self.tarball_data()
if not data:
return
kb = round(len(data) / 1024, 1)
# maybe something like:
# upload and return registered timestamp. write timestamp locally. when uploading, check that the last_updated_ts in postgres matches the file
# if it doesn't, you've probably diverged, but someone may have put an invalid ratchet more recently by mistake (e.g. restarting triggering upload despite crashing)
# or:
# open("last_uploaded_checksum", "w").write(zlib.crc32(buffer.seek(0).read()))
# you could formalize this as "present the previous checksum to upload" as a db procedure
await self.account_interface.upload(self.number, data)
logging.debug("saved %s kb of tarballed datastore to supabase", kb)
return
async def mark_freed(self) -> list:
"""Marks account as freed in PG database."""
return await self.account_interface.mark_account_freed(self.number)
def setup_tmpdir() -> None:
if not utils.LOCAL:
logging.warning("not setting up tmpdir, FLY_APP_NAME is set")
return
if utils.ROOT_DIR == ".":
logging.warning("not setting up tmpdir, using current directory")
return
if utils.ROOT_DIR == "/tmp/local-signal/" and not utils.MEMFS:
try:
shutil.rmtree(utils.ROOT_DIR)
except (FileNotFoundError, OSError) as e:
logging.warning("couldn't remove rootdir: %s", e)
if not utils.MEMFS:
(Path(utils.ROOT_DIR) / "data").mkdir(exist_ok=True, parents=True)
try:
os.symlink(Path("avatar.png").absolute(), utils.ROOT_DIR + "/avatar.png")
except FileExistsError:
pass
logging.info("chdir to %s", utils.ROOT_DIR)
os.chdir(utils.ROOT_DIR)
return
async def getFreeSignalDatastore() -> SignalDatastore:
interface = get_account_interface()
await interface.free_accounts_not_updated_in_the_last_hour()
record = await interface.get_free_account()
if not record:
raise Exception("no free accounts")
# alternatively, register an account...
# could put some of register.py/signalcaptcha handler here...
number = record[0].get("id")
logging.info(number)
assert number
return SignalDatastore(number)
# maybe a config about where we're running:
# MEMFS, DOWNLOAD, ROOT_DIR, etc
# is HCL overkill?
parser = argparse.ArgumentParser(
description="manage the signal datastore. use ENV=... to use something other than dev"
)
subparser = parser.add_subparsers(dest="subparser") # ?
# h/t https://gist.github.com/mivade/384c2c41c3a29c637cb6c603d4197f9f
def argument(*name_or_flags: Any, **kwargs: Any) -> tuple:
"""Convenience function to properly format arguments to pass to the
subcommand decorator.
"""
return (list(name_or_flags), kwargs)
def subcommand(
_args: Optional[list] = None, parent: argparse._SubParsersAction = subparser
) -> Callable:
"""Decorator to define a new subcommand in a sanity-preserving way.
The function will be stored in the ``func`` variable when the parser
parses arguments so that it can be called directly like so::
args = cli.parse_args()
args.func(args)
Usage example::
@subcommand([argument("-d", help="Enable debug mode", action="store_true")])
def subcommand(args):
print(args)
Then on the command line::
$ python cli.py subcommand -d
"""
def decorator(func: Callable) -> None:
_parser = parent.add_parser(func.__name__, description=func.__doc__)
for arg in _args if _args else []:
_parser.add_argument(*arg[0], **arg[1])
_parser.set_defaults(func=func)
return decorator
@subcommand()
async def list_accounts(_args: argparse.Namespace) -> None:
"list available accounts in table format"
cols = ["id", "last_update_ms", "last_claim_ms", "active_node_name"]
interface = get_account_interface()
# sorry
if "notes" in [
column.get("column_name")
for column in (
await interface.execute(
"select column_name from information_schema.columns where table_name='signal_accounts';"
)
or [] # don't error if the query fails
)
]:
cols.append("notes")
query = f"select {' ,'.join(cols)} from signal_accounts order by id"
accounts = await get_account_interface().execute(query)
if not isinstance(accounts, list):
return
table = [cols] + [
[str(value) for value in account.values()] for account in accounts
]
str_widths = [max(len(row[index]) for row in table) for index in range(len(cols))]
row_format = " ".join("{:<" + str(width) + "}" for width in str_widths)
for row in table:
print((row_format.format(*row).rstrip()))
return
@subcommand([argument("--number")])
async def free(ns: argparse.Namespace) -> None:
"mark account freed"
await get_account_interface().mark_account_freed(ns.number)
async def _set_note(number: str, note: str) -> None:
await get_account_interface().execute(
"update signal_accounts set notes=$1 where id=$2",
note,
number,
)
@subcommand([argument("--number"), argument("note", help="new note for number")])
async def set_note(ns: argparse.Namespace) -> None:
"set the note field for a number"
await _set_note(ns.number, ns.note)
@subcommand(
[argument("--path"), argument("--number"), argument("note", help="note for number")]
)
async def upload(ns: argparse.Namespace) -> None:
"""
upload a datastore
--path to a directory that contains data/
--number for the account number
note: note indicating if this number is free or used for a specific bot
"""
if ns.path:
os.chdir(ns.path)
if ns.number:
num = ns.number
else:
num = sorted(os.listdir("data"))[0]
store = SignalDatastore(num)
await store.upload()
await _set_note(num, ns.note)
@subcommand([argument("--number")])
async def sync(ns: argparse.Namespace) -> None:
# maybe worth running autosave after all?
try:
datastore = SignalDatastore(ns.number)
await datastore.download()
except (IndexError, DatastoreError):
datastore = await getFreeSignalDatastore()
await datastore.download()
try:
while 1:
time.sleep(3600)
except KeyboardInterrupt:
await datastore.upload()
| |
<filename>patcherex/techniques/simple_ptr_enc.py<gh_stars>100-1000
import string
import random
import logging
from collections import defaultdict
import networkx
import pyvex
from angr.sim_variable import SimConstantVariable, SimRegisterVariable, SimMemoryVariable
from angr import KnowledgeBase
from ..backends import ReassemblerBackend
from ..errors import SimplePtrEncError
from ..technique import Technique
from ..patches import InsertCodePatch, PointerArrayPatch, AddEntryPointPatch, AddRWDataPatch
l = logging.getLogger('techniques.simple_ptr_enc')
# TODO: - detect if ebp is used as base pointer in a function or not
# TODO: - support more types of VEX statements and expressions
# TODO: - compress the pointer storage array
# TODO: - use random strings for label names ('begin', 'end', etc.)
# TODO: - more testing
# TODO: - bug fixes
# TODO: - do not re-encrypt for control-flow changing code, like jmps and calls
class MiniAST(object):
def __init__(self, op, args=None):
self.op = op
self.args = args
def __repr__(self):
s = "(%s %s)" % (self.op, " ".join(repr(a) for a in self.args))
return s
class DerefInstruction(object):
def __init__(self, ins_addr, action, addr_regs):
self.ins_addr = ins_addr
self.ins_size = None
self.action = action
self.addr_regs = addr_regs
self.addr_reg_overwritten = None
self.skip = False
self.decryption_addrs = None
self.encryption_addrs = None
def __repr__(self):
return "<Deref %#08x %s@%s>" % (self.ins_addr, self.action, self.addr_regs)
@property
def should_reencrypt(self):
if self.encryption_addrs is None:
return True
return len(self.encryption_addrs) > 0
class RefInstruction(object):
def __init__(self, ins_addr, addr_reg, sources, store_addr=None):
self.ins_addr = ins_addr
self.ins_size = None
self.addr_reg = addr_reg
self.sources = sources
self.store_addr = store_addr
def __repr__(self):
if self.addr_reg is not None:
return "<Ref %#08x %s: %s>" % (self.ins_addr, self.addr_reg, self.sources)
else:
return "<Ref %#08x %s: %s>" % (self.ins_addr, self.store_addr, self.sources)
class BlockTraverser(object):
OPSTR_TO_OP = {
'Iop_Add32': '+',
'Iop_Sub32': '-',
'Iop_Sub8': '-',
'Iop_Shl32': '<<',
'Iop_And8': '&',
'Iop_And32': '&',
'Iop_Sar32': '>>',
'Iop_Shr32': '>>',
'Iop_Mul32': '*',
}
def __init__(self, cfg):
self.cfg = cfg
self._addr_belongs_to_section = self.cfg._addr_belongs_to_section
# global temporary variables
self._last_instr = None
self.ins_addr = None
self.instrs = [ ]
self.ip_offset = self.cfg.project.arch.ip_offset
self.sp_offset = self.cfg.project.arch.sp_offset
self.bp_offset = self.cfg.project.arch.bp_offset
self.tmps = {}
self.regs = {}
# begin traversal!
self._analyze()
self._post_analysis()
def _is_addr_valid(self, addr):
if self._addr_belongs_to_section(addr) is not None:
return True
if 0x4347c000 <= addr < 0x4347c000 + 0x1000:
return True
return False
def _ast_to_addr_regs(self, ast):
"""
Pick registers that holds a valid address and return them.
:param MiniAST ast: The AST
:return: A list of register offsets.
:rtype: list
"""
# we only care about where the base comes from
# - if there is only one register, then it must be the base
# - if there are a register and a const, the register is the base if and only if the const
# is not a valid address. Otherwise the constant is the base
# - if there are two constants, it's gonna be a little complicated... TODO
is_addr_valid = self._is_addr_valid
if len(ast.args) == 2:
# binary operations
if ast.op == '+':
if ast.args[1].op == 'const':
# something + constant
if is_addr_valid(ast.args[1].args[0]):
# base address + some offset
return []
else:
if ast.args[0].op == 'reg':
# this register must be the base address
return [ast.args[0].args[0]]
elif ast.args[0].op == 'const' and is_addr_valid(ast.args[0].args[0]):
# the constant is the base address
return []
else:
return self._ast_to_addr_regs(ast.args[0])
elif ast.args[1].op in ('<<',):
# arg1 must be used as an offset or index
# arg0 is the base address
if ast.args[0].op == 'reg':
return [ast.args[0].args[0]]
elif ast.args[0].op == 'const':
return []
elif ast.args[0].op == 'reg':
# let's see if we can extract a base address from other arguments
regs = self._ast_to_addr_regs(ast.args[1])
if not regs:
# nice! the first argument must be the base register
return ast.args[0].args[0]
elif ast.op == '-':
if ast.args[0].op == 'reg':
return [ast.args[0].args[0]]
elif ast.args[0].op == 'const':
return []
elif len(ast.args) == 1:
if ast.op == 'reg':
# directly using the register as the address
return ast.args
elif ast.op == 'const':
# using a constant as the address
return []
print "Unresolved AST", ast
import ipdb; ipdb.set_trace()
def _ast_to_indir_memrefs(self, ast):
"""
:param ast:
:return:
"""
if len(ast.args) == 2:
if ast.args[0].op == 'reg' and ast.args[1].op == 'const':
# reg +/- const
# the original instruction might be 'push <addr>' when the const is 4
reg_offset = ast.args[0].args[0]
reg_name = self.cfg.project.arch.register_names[reg_offset]
const = ast.args[1].args[0]
op = ast.op
return "dword ptr [{reg_name} {op} {delta}]".format(
reg_name=reg_name,
op=op,
delta=const,
)
else:
import ipdb; ipdb.set_trace()
elif len(ast.args) == 1:
if ast.op == 'const':
return 'dword ptr [%#x]' % ast.args[0]
elif ast.op == 'reg':
reg_offset = ast.args[0]
return 'dword ptr [%s]' % (self.cfg.project.arch.register_names[reg_offset])
else:
print "Unresolved AST", ast
import ipdb; ipdb.set_trace()
def _filter_instrs(self):
raise NotImplementedError()
def _post_analysis(self):
raise NotImplementedError()
def _analyze(self):
for function in self.cfg.functions.values(): # type: angr.knowledge.Function
for block in function.blocks:
self.last_instr = None # type: DerefInstruction
vex_block_noopt = self.cfg.project.factory.block(block.addr, opt_level=0, size=block.size).vex
self.ins_addr = None
self.tmps = {}
self.regs = {}
for stmt in vex_block_noopt.statements:
handler = getattr(self, '_handle_statement_%s' % (stmt.__class__.__name__), None)
if handler is not None:
handler(stmt)
if self.last_instr is not None and self.last_instr.ins_size is None:
self.last_instr.ins_size = block.addr + vex_block_noopt.size - self.last_instr.ins_addr
self.last_instr = None
self._handle_next(vex_block_noopt.next)
if self.last_instr is not None and self.last_instr.ins_size is None:
self.last_instr.ins_size = block.addr + vex_block_noopt.size - self.last_instr.ins_addr
self.last_instr = None
self._filter_instrs()
def _handle_next(self, next_expr):
pass
def _handle_statement_IMark(self, stmt):
self.ins_addr = stmt.addr + stmt.delta
# update the instruction size of the previous DerefInstruction object
if self.last_instr is not None and self.last_instr.ins_size is None:
self.last_instr.ins_size = self.ins_addr - self.last_instr.ins_addr
self.last_instr = None
def _handle_statement_WrTmp(self, stmt):
tmp = stmt.tmp
data = stmt.data
data = self._handle_expression(data)
if data is not None:
self.tmps[tmp] = data
def _handle_statement_Put(self, stmt):
# loading data into a register
if self.last_instr is not None and self.last_instr.addr_reg_overwritten is None and \
self.last_instr.ins_addr == self.ins_addr and \
len(self.last_instr.addr_regs) == 1 and \
stmt.offset in self.last_instr.addr_regs:
# the address register is overwritten in the same instruction
self.last_instr.addr_reg_overwritten = True
data = self._handle_expression(stmt.data)
if data is not None:
self.regs[stmt.offset] = data
return data
def _handle_expression(self, expr, allow_override=True):
if allow_override:
expr_handler = getattr(self.__class__, '_handle_expression_%s' % (expr.__class__.__name__), None)
else:
expr_handler = getattr(BlockTraverser, '_handle_expression_%s' % (expr.__class__.__name__), None)
if expr_handler is not None:
return expr_handler(self, expr)
else:
return None
def _handle_expression_Get(self, expr):
# read from register
if expr.offset not in (self.ip_offset, self.bp_offset, self.sp_offset):
return MiniAST('reg', [ expr.offset ])
def _handle_expression_Binop(self, expr):
# some sort of arithmetic operations
if expr.op.startswith('Iop_Cmp') or \
expr.op.startswith('Iop_Div') or \
expr.op.startswith('Iop_Or') or \
expr.op.startswith('Iop_Xor') or \
expr.op in ('Iop_32HLto64',):
# ignore them.
return None
elif expr.op in self.OPSTR_TO_OP:
op = self.OPSTR_TO_OP[expr.op]
else:
op = expr.op
args = []
for arg in expr.args:
arg_data = self._handle_expression(arg, False)
if arg_data is None:
return None
args.append(arg_data)
return MiniAST(op, args)
def _handle_expression_RdTmp(self, expr):
data_tmp = expr.tmp
if data_tmp in self.tmps:
return self.tmps[data_tmp]
def _handle_expression_Const(self, expr):
value = expr.con.value
return MiniAST('const', [ value ])
class MemoryRefCollector(BlockTraverser):
def __init__(self, cfg):
super(MemoryRefCollector, self).__init__(cfg)
def _has_regs(self, ast, reg_offsets):
if not isinstance(ast, MiniAST):
return False
if ast.op == 'reg' and ast.args[0] in reg_offsets:
return True
for arg in ast.args:
if self._has_regs(arg, reg_offsets):
return True
return False
def _filter_instrs(self):
# filtering
self.instrs = [i for i in self.instrs if i.addr_reg not in (self.ip_offset, self.sp_offset, self.bp_offset)
and i.addr_reg < 40 # this is x86 only - 40 is cc_op
]
def _post_analysis(self):
# do nothing
pass
def _handle_statement_Put(self, stmt):
data = self._handle_expression(stmt.data)
if data is not None and stmt.offset != self.ip_offset:
# check whether data is a memory reference or not
if data.op == 'const' or \
(self._has_regs(data, (self.sp_offset, self.bp_offset)) and
(stmt.offset not in (self.sp_offset, self.bp_offset))
):
self.last_instr = RefInstruction(self.ins_addr, stmt.offset, data)
self.instrs.append(self.last_instr)
# special case handling: writing data to esp
if data is not None and stmt.offset is self.sp_offset:
if isinstance(stmt.data, pyvex.IRExpr.RdTmp):
tmp = stmt.data.tmp
self.tmps[tmp] = MiniAST('reg', [ stmt.offset ])
def _handle_statement_Store(self, stmt):
data = self._handle_expression(stmt.data)
addr = self._handle_expression(stmt.addr)
if data is not None and addr is not None:
# check whether data is a memory reference or not
if data.op == 'const': # and not self._has_regs(addr, (self.sp_offset, self.bp_offset)):
self.last_instr = RefInstruction(self.ins_addr, None, data, store_addr=self._ast_to_indir_memrefs(addr))
self.instrs.append(self.last_instr)
def _handle_expression_Get(self, expr):
# read from register
if expr.offset != self.ip_offset:
return MiniAST('reg', [ expr.offset ])
def _handle_expression_Const(self, expr):
value = expr.con.value
# is it using an effective address?
if self._is_addr_valid(value):
# it is... | |
getHost(self, name):
"""Get information about a Host.
:param name: The name of the Host to find
:type name: str
:returns: host dict
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- NON_EXISTENT_HOST - HOST doesn't exist
"""
response, body = self.http.get('/hosts/%s' % name)
return body
def createHost(self, name, iscsiNames=None, FCWwns=None, optional=None):
"""Create a new Host entry.
:param name: The name of the host
:type name: str
:param iscsiNames: Array if iscsi iqns
:type name: array
:param FCWwns: Array if Fibre Channel World Wide Names
:type name: array
:param optional: The optional stuff
:type optional: dict
.. code-block:: python
optional = {
'persona': 1, # ID of the persona to assign
# to the host.
# 3.1.3 default: Generic-ALUA
# 3.1.2 default: General
'domain': 'myDomain', # Create the host in the
# specified domain, or default
# domain if unspecified.
'forceTearDown': False, # If True, force to tear down
# low-priority VLUN exports.
'descriptors':
{'location': 'earth', # The host's location
'IPAddr': '10.10.10.10', # The host's IP address
'os': 'linux', # The operating system running
# on the host.
'model': 'ex', # The host's model
'contact': 'Smith', # The host's owner and contact
'comment': "Joe's box"} # Additional host information
}
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- PERM_DENIED - Permission denied
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_MISSING_REQUIRED - Name not specified.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_PARAM_CONFLICT - FCWWNs and iSCSINames are both
specified.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_EXCEEDS_LENGTH - Host name, domain name, or iSCSI name
is too long.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_EMPTY_STR - Input string (for domain name, iSCSI name,
etc.) is empty.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_ILLEGAL_CHAR - Any error from host-name or domain-name
parsing.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_TOO_MANY_WWN_OR_iSCSI - More than 1024 WWNs or iSCSI
names are specified.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_WRONG_TYPE - The length of WWN is not 16. WWN
specification contains non-hexadecimal digit.
:raises: :class:`~hpe3parclient.exceptions.HTTPConflict`
- EXISTENT_PATH - host WWN/iSCSI name already used by another host
:raises: :class:`~hpe3parclient.exceptions.HTTPConflict`
- EXISTENT_HOST - host name is already used.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- NO_SPACE - No space to create host.
"""
info = {'name': name}
if iscsiNames:
iscsi = {'iSCSINames': iscsiNames}
info = self._mergeDict(info, iscsi)
if FCWwns:
fc = {'FCWWNs': FCWwns}
info = self._mergeDict(info, fc)
if optional:
info = self._mergeDict(info, optional)
response, body = self.http.post('/hosts', body=info)
return body
def modifyHost(self, name, mod_request):
"""Modify an existing Host entry.
:param name: The name of the host
:type name: str
:param mod_request: Objects for Host Modification Request
:type mod_request: dict
.. code-block:: python
mod_request = {
'newName': 'myNewName', # New name of the host
'pathOperation': 1, # If adding, adds the WWN or
# iSCSI name to the existing
# host.
'FCWWNs': [], # One or more WWN to set for
# the host.
'iSCSINames': [], # One or more iSCSI names to
# set for the host.
'forcePathRemoval': False, # If True, remove SSN(s) or
# iSCSI(s) even if there are
# VLUNs exported to host
'persona': 1, # ID of the persona to modify
# the host's persona to.
'descriptors':
{'location': 'earth', # The host's location
'IPAddr': '10.10.10.10', # The host's IP address
'os': 'linux', # The operating system running
# on the host.
'model': 'ex', # The host's model
'contact': 'Smith', # The host's owner and contact
'comment': '<NAME>'} # Additional host information
'chapOperation': HOST_EDIT_ADD, # Add or remove
'chapOperationMode': CHAP_INITIATOR, # Initator or target
'chapName': 'MyChapName', # The chap name
'chapSecret': 'xyz', # The chap secret for the host
# or the target
'chapSecretHex': False, # If True, the chapSecret is
# treated as Hex.
'chapRemoveTargetOnly': True # If True, then remove target
# chap only
}
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT - Missing host name.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_PARAM_CONFLICT - Both iSCSINames & FCWWNs are
specified. (lot of other possibilities)
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_ONE_REQUIRED - iSCSINames or FCWwns missing.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_ONE_REQUIRED - No path operation specified.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_BAD_ENUM_VALUE - Invalid enum value.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_MISSING_REQUIRED - Required fields missing.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_EXCEEDS_LENGTH - Host descriptor argument length, new
host name, or iSCSI name is too long.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_ILLEGAL_CHAR - Error parsing host or iSCSI name.
:raises: :class:`~hpe3parclient.exceptions.HTTPConflict`
- EXISTENT_HOST - New host name is already used.
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- NON_EXISTENT_HOST - Host to be modified does not exist.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_TOO_MANY_WWN_OR_iSCSI - More than 1024 WWNs or iSCSI
names are specified.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_WRONG_TYPE - Input value is of the wrong type.
:raises: :class:`~hpe3parclient.exceptions.HTTPConflict`
- EXISTENT_PATH - WWN or iSCSI name is already claimed by other
host.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_BAD_LENGTH - CHAP hex secret length is not 16 bytes, or
chap ASCII secret length is not 12 to 16 characters.
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- NO_INITIATOR_CHAP - Setting target CHAP without initiator CHAP.
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- NON_EXISTENT_CHAP - Remove non-existing CHAP.
:raises: :class:`~hpe3parclient.exceptions.HTTPConflict`
- NON_UNIQUE_CHAP_SECRET - CHAP secret is not unique.
:raises: :class:`~hpe3parclient.exceptions.HTTPConflict`
- EXPORTED_VLUN - Setting persona with active export; remove a host
path on an active export.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- NON_EXISTENT_PATH - Remove a non-existing path.
:raises: :class:`~hpe3parclient.exceptions.HTTPConflict`
- LUN_HOSTPERSONA_CONFLICT - LUN number and persona capability
conflict.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_DUP_PATH - Duplicate path specified.
"""
response = self.http.put('/hosts/%s' % name, body=mod_request)
return response
def deleteHost(self, name):
"""Delete a Host.
:param name: Host Name
:type name: str
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- NON_EXISTENT_HOST - HOST Not Found
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- IN_USE - The HOST Cannot be removed because it's in use.
:raises: :class:`~hpe3parclient.exceptions.HTTPForbidden`
- PERM_DENIED - Permission denied
"""
response, body = self.http.delete('/hosts/%s' % name)
def findHost(self, iqn=None, wwn=None):
"""Find a host from an iSCSI initiator or FC WWN.
:param iqn: lookup based on iSCSI initiator
:type iqn: str
:param wwn: lookup based on WWN
:type wwn: str
"""
# for now there is no search in the REST API
# so we can do a create looking for a specific
# error. If we don't get that error, we nuke the
# fake host.
def _hostname():
# create a safe, random hostname that won't
# create a collision when findHost is called
# in parallel, before the temp host is removed.
uuid_str = str(uuid.uuid4()).replace("-", "")[:20]
return uuid_str
cmd = ['createhost']
# create a random hostname
hostname = _hostname()
if iqn:
cmd.append('-iscsi')
cmd.append(hostname)
if iqn:
cmd.append(iqn)
else:
cmd.append(wwn)
result = self._run(cmd)
test = ' '.join(result)
search_str = "already used by host "
if search_str in test:
# host exists, return name used by 3par
hostname_3par = self._get_next_word(test, search_str)
return hostname_3par
else:
# host creation worked...so we need to remove it.
# this means we didn't find an existing host that
# is using the iqn or wwn.
self.deleteHost(hostname)
return None
def queryHost(self, iqns=None, wwns=None):
"""Find a host from an iSCSI initiator or FC WWN.
:param iqns: lookup based on iSCSI initiator list
:type iqns: list
:param wwns: lookup based on WWN list
:type wwns: list
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT - Invalid URI syntax.
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- NON_EXISTENT_HOST - HOST Not Found
:raises: :class:`~hpe3parclient.exceptions.HTTPInternalServerError`
- INTERNAL_SERVER_ERR - Internal server error.
:raises: :class:`~hpe3parclient.exceptions.HTTPBadRequest`
- INV_INPUT_ILLEGAL_CHAR - Host name contains invalid character.
"""
wwnsQuery = ''
if wwns:
tmpQuery = []
for wwn in wwns:
tmpQuery.append('wwn==%s' % wwn)
wwnsQuery = ('FCPaths[%s]' % ' OR '.join(tmpQuery))
iqnsQuery = ''
if iqns:
tmpQuery = []
for iqn in iqns:
tmpQuery.append('name==%s' % iqn)
iqnsQuery = ('iSCSIPaths[%s]' % ' OR '.join(tmpQuery))
query = ''
if wwnsQuery and iqnsQuery:
query = ('%(wwns)s OR %(iqns)s' % ({'wwns': wwnsQuery,
'iqns': iqnsQuery}))
elif wwnsQuery:
query = wwnsQuery
elif iqnsQuery:
query = iqnsQuery
query = '"%s"' % query
response, body = self.http.get('/hosts?query=%s' %
quote(query.encode("utf8")))
return body
def getHostVLUNs(self, hostName):
"""Get all of the VLUNs on a specific Host.
:param hostName: Host name
:type hostNane: str
:raises: :class:`~hpe3parclient.exceptions.HTTPNotFound`
- NON_EXISTENT_HOST - HOST Not Found
"""
# calling getHost to see if the host exists and raise not found
# exception if it's not found.
self.getHost(hostName)
vluns = []
| |
<filename>src/copulabayesnet/cop_plot.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on 5-12-2019
@author: <NAME>
"""
import matplotlib.pyplot as plt
import matplotlib.animation as manimation
#from biokit.viz.scatter import ScatterHist # biokit is not working right now
#from biokit.viz import corrplot
import numpy as np
from pycopula.visualization import pdf_2d, cdf_2d
#from tools.bncopula import Copula2d
from matplotlib import cm
import datetime as dt
metadata = dict(title='Copula', artist='<NAME>',
comment='Enjoy!')
import matplotlib
def cop_2_param(Copula2d,
type_copula = "pdf",
plot_method = "3d",
zclip = 6,
elev = 15.,
azim = 280,
x_name = None,
y_name = None,
title = None,
save_fig=False,
save_path='pred_target.png'):
"""Plots a 2-parameter copula, either on a 2d or 3d plane, either the pdf or the cdf.
Parameters
----------
Copula2d : bncopula.Copula2d Object
A copula object
type_copula : str (optional)
Default value: "pdf"
The capacity of the copula: either 'pdf' or 'cdf'
plot_method : str (optional)
Default value: 3d.
Plot either 2d or 3d.
zclip : float (optional)
Default value: 6.
The maximum z-value to plot
elev : float (optional)
Default value: 15.
From which elevation to watch the 3d plot
azim : float (optional)
Default value: 280.
The angle to watch the 3d plot.
x_name : str (optional)
The name of the x-axis. When None, tries to find name in the copula.
y_name : str (optional)
The name of the y-axis. When None, tries to find name in the copula.
title : str (optional)
Title of the figure.
save_fig : bool (optional)
Whether or not to save the figure.
save_path : str (optional)
Path where to save the figure to
"""
copula = Copula2d.copula
if type_copula.lower() == "pdf":
level = pdf_2d(copula, zclip = zclip)
elif type_copula.lower() == "cdf":
level = cdf_2d(copula)
else:
raise ValueError("type_copula should be either 'pdf' or 'cdf'")
x,y = np.meshgrid(level[0], level[1])
# Handle data names
if x_name is None:
try:
x_name = Copula2d.x_name
except:
x_name = "X values"
if y_name is None:
try:
y_name = Copula2d.y_name
except:
y_name = "Y values"
fig = plt.figure(figsize = (10,8))
if plot_method.lower() == "3d":
ax = fig.add_subplot(111, projection='3d')
ax.view_init(elev=elev, azim=azim)
ax.plot_surface(x,y,level[2],cmap=cm.coolwarm)
if type_copula.lower() == "pdf":
ax.set_zlabel("Probability ($p$)")
elif type_copula.lower() == "cdf":
ax.set_zlabel("Cumulative probability")
elif plot_method.lower() == "2d":
ax = fig.add_subplot(111)
maxval = round(np.min([np.nanmax(np.array(level[2])), zclip]))
cm_levels = np.linspace(0,maxval, np.max([20, maxval*5])+1)
cmp = ax.contourf(x,y,level[2],cmap=cm.coolwarm, levels = cm_levels)
ax.contour(x,y,level[2], colors='k',linewidths=0.5, levels = cm_levels)
cbar = plt.colorbar(cmp)
ax.set_xlim(0,1)
ax.set_ylim(0,1)
ax.axis('scaled')
if type_copula.lower() == "pdf":
cbar.set_label("Probability (p)", rotation=270, labelpad = 20)
elif type_copula.lower() == "cdf":
cbar.set_label("Cumulative probability", rotation=270, labelpad = 20)
else:
raise ValueError("Plot method should be either '2d' or '3d'")
if title is None:
ax.set_title(f"Figure of a copula ({type_copula})")
else:
ax.set_title(title)
ax.set_xlabel(x_name)
ax.set_ylabel(y_name)
if save_fig:
plt.savefig(save_path, dpi=200, bbox_inches='tight')
def different_cop(x, y, level,
type_copula = "pdf",
plot_method = "3d",
zclip = 6,
elev = 15.,
azim = 280,
x_name = "$u_1$",
y_name = "$u_2$",
title = None,
save_fig=False,
save_path='pred_target.png'):
"""Plots any 2-parameter copula, either on a 2d or 3d plane, either the pdf or the cdf.
Parameters
----------
Copula2d : bncopula.Copula2d Object
A copula object
type_copula : str (optional)
Default value: "pdf"
The capacity of the copula: either 'pdf' or 'cdf'
plot_method : str (optional)
Default value: 3d.
Plot either 2d or 3d.
zclip : float (optional)
Default value: 6.
The maximum z-value to plot
elev : float (optional)
Default value: 15.
From which elevation to watch the 3d plot
azim : float (optional)
Default value: 280.
The angle to watch the 3d plot.
x_name : str (optional)
The name of the x-axis. When None, tries to find name in the copula.
y_name : str (optional)
The name of the y-axis. When None, tries to find name in the copula.
title : str (optional)
Title of the figure.
save_fig : bool (optional)
Whether or not to save the figure.
save_path : str (optional)
Path where to save the figure to
"""
fig = plt.figure(figsize = (7,5.6)) # 10,8
if plot_method.lower() == "3d":
#ax = fig.add_subplot(111, projection='3d')
ax = Axes3D(fig)
ax.view_init(elev=elev, azim=azim)
level[level > zclip] = zclip
ax.plot_surface(x,y,level,cmap=cm.coolwarm)
plt.tight_layout()
#ax.set_zticklabels((), color = '#4d4d4d', fontsize = 14)
if type_copula.lower() == "pdf":
ax.set_zlim(0,zclip)
ax.set_zlabel("Probability (p)", color = '#4d4d4d', fontsize = 14)
elif type_copula.lower() == "cdf":
ax.set_zlabel("Cumulative probability", color = '#4d4d4d', fontsize = 14)
ax.w_zaxis.set_pane_color((250/255, 250/255, 250/255))
ax.w_yaxis.set_pane_color((225/255, 225/255, 225/255))
ax.w_xaxis.set_pane_color((190/255, 190/255, 190/255))
ax.xaxis._axinfo["grid"]['linestyle'] = ':'
ax.yaxis._axinfo["grid"]['linestyle'] = ':'
ax.zaxis._axinfo["grid"]['linestyle'] = ':'
#ax.grid(linestyle = ':')
#fig.set_tight_layout(True)
elif plot_method.lower() == "2d":
ax = fig.add_subplot(111)
maxval = round(np.min([np.nanmax(np.array(level)), zclip]))
cm_levels = np.linspace(0,maxval, int(np.max([20, maxval*5])+1))
cmp = ax.contourf(x,y,level,cmap=cm.coolwarm, levels = cm_levels)
ax.contour(x,y,level, colors='k',linewidths=0.5, levels = cm_levels)
cbar = plt.colorbar(cmp)
ax.set_xlim(0,1)
ax.set_ylim(0,1)
ax.axis('scaled')
if type_copula.lower() == "pdf":
cbar.set_label("Probability (p)", rotation=270, labelpad = 20,
color = '#4d4d4d', fontsize = 14)
elif type_copula.lower() == "cdf":
cbar.set_label("Cumulative probability", rotation=270, labelpad = 20,
color = '#4d4d4d', fontsize = 14)
else:
raise ValueError("Plot method should be either '2d' or '3d'")
if title is None:
ax.set_title(f"Figure of a copula ({type_copula})",
fontsize = 16)
else:
ax.set_title(title, fontsize = 14)
ax.set_xlabel(x_name, color = '#4d4d4d', fontsize = 14)
ax.set_ylabel(y_name, color = '#4d4d4d', fontsize = 14)
#ax.set_xticklabels((), color = '#4d4d4d', fontsize = 14)
ax.tick_params(axis='both', which='major',color = '#4d4d4d')
if save_fig:
print(save_path)
plt.savefig(save_path, dpi=200)
def different_cop_video(x, y, level,
type_copula = "pdf",
plot_method = "3d",
zclip = 6,
elev = 15.,
azim = 280,
x_name = "$u_1$",
y_name = "$u_2$",
title = None,
save_video = False,
save_fig=False,
save_path='pred_target.png'):
"""Plots any 2-parameter copula, either on a 2d or 3d plane,
either the pdf or the cdf.
Parameters
----------
Copula2d : bncopula.Copula2d Object
A copula object
type_copula : str (optional)
Default value: "pdf"
The capacity of the copula: either 'pdf' or 'cdf'
plot_method : str (optional)
Default value: 3d.
Plot either 2d or 3d.
zclip : float (optional)
Default value: 6.
The maximum z-value to plot
elev : float (optional)
Default value: 15.
From which elevation to watch the 3d plot
azim : float (optional)
Default value: 280.
The angle to watch the 3d plot.
x_name : str (optional)
The name of the x-axis. When None, tries to find name in the copula.
y_name : str (optional)
The name of the y-axis. When None, tries to find name in the copula.
title : str (optional)
Title of the figure.
save_fig : bool (optional)
Whether or not to save the figure.
save_path : str (optional)
Path where to save the figure to
"""
fig = plt.figure(figsize = (7,5.6)) # 10,8
if save_video:
path2 = "../figures/part6_2.mp4"
matplotlib.use("Agg")
FFMpegWriter = manimation.writers['ffmpeg']
writer = FFMpegWriter(fps=10, metadata=metadata, bitrate = 2000)
with writer.saving(fig, path2, 200):
if plot_method.lower() == "3d":
ax = fig.add_subplot(111, projection='3d')
if title is None:
ax.set_title(f"Figure of a copula ({type_copula})",
fontsize = 16)
else:
ax.set_title(title, fontsize = 14)
ax.set_xlabel(x_name, color = '#4d4d4d', fontsize = 14)
ax.set_ylabel(y_name, color = '#4d4d4d', fontsize = 14)
#ax.set_xticklabels((), color = '#4d4d4d', fontsize = 14)
ax.tick_params(axis='both', which='major',color = '#4d4d4d')
level[level > zclip] = zclip
ax.plot_surface(x,y,level,cmap=cm.coolwarm)
#plt.tight_layout()
#ax.set_zticklabels((), color = '#4d4d4d', fontsize = 14)
if type_copula.lower() == "pdf":
ax.set_zlim(0,zclip)
ax.set_zlabel("Probability (p)", color = '#4d4d4d', fontsize = 14)
elif type_copula.lower() == "cdf":
ax.set_zlabel("Cumulative probability", color = '#4d4d4d', fontsize = 14)
ax.w_zaxis.set_pane_color((250/255, 250/255, 250/255))
ax.w_yaxis.set_pane_color((225/255, 225/255, 225/255))
ax.w_xaxis.set_pane_color((190/255, 190/255, 190/255))
ax.xaxis._axinfo["grid"]['linestyle'] = ':'
ax.yaxis._axinfo["grid"]['linestyle'] = ':'
ax.zaxis._axinfo["grid"]['linestyle'] = ':'
for i in np.linspace(0,360,100):
ax.view_init(elev=25+i/8, azim=i)
writer.grab_frame()
#ax.grid(linestyle = ':')
#fig.set_tight_layout(True)
elif plot_method.lower() == "2d":
ax = fig.add_subplot(111)
maxval = round(np.min([np.nanmax(np.array(level)), zclip]))
cm_levels = np.linspace(0,maxval, int(np.max([20, maxval*5])+1))
cmp = ax.contourf(x,y,level,cmap=cm.coolwarm, levels = cm_levels)
ax.contour(x,y,level, colors='k',linewidths=0.5, levels = cm_levels)
cbar = plt.colorbar(cmp)
ax.set_xlim(0,1)
ax.set_ylim(0,1)
ax.axis('scaled')
if type_copula.lower() == "pdf":
cbar.set_label("Probability (p)", rotation=270, labelpad = 20,
color = '#4d4d4d', fontsize = 14)
elif type_copula.lower() == "cdf":
cbar.set_label("Cumulative probability", rotation=270, labelpad = | |
random_type == 'micro_multiply_by':
rule['how_many'] = random.choice([2, 3, 4, 5, 'size'])
elif random_type == 'macro_multiply_by':
rule['how_many'] = random.choice(['both', 'hor', 'ver'])
rule['rotates'] = [np.random.randint(1) for _ in range(4)]
rule['flips'] = [random.choice(['hor', 'ver', 'horver', 'no']) for _ in range(4)]
elif random_type == 'distribute_from_border':
rule['colors'] = list(np.unique([get_random_out_color(), get_random_all_color()]))
elif random_type == 'draw_lines':
rule['direction'] = random.choice(['everywhere', 'horizontal', 'vertical', 'horver', 'diagonal'])
# 'top', 'bottom', 'left', 'right',
# 'top_left', 'bottom_left', 'top_right', 'bottom_right'])
rule['not_stop_by_color'] = 0 # get_random_all_color()
rule['start_by_color'] = get_random_all_color()
rule['with_color'] = get_random_out_color()
elif random_type == 'reduce':
rule['skip_color'] = get_random_all_color()
elif random_type == 'draw_line_to':
#rule['direction_type'] = random.choice(['border'])
rule['direction_color'] = get_random_all_color()
rule['not_stop_by_color'] = 0
if np.random.rand() < 0.5:
rule['not_stop_by_color_and_skip'] = get_random_all_color()
else:
rule['not_stop_by_color_and_skip'] = 0
rule['start_by_color'] = get_random_all_color()
rule['with_color'] = get_random_out_color()
elif random_type == 'distribute_colors':
rule['colors'] = list(np.unique([get_random_out_color(), get_random_all_color()]))
rule['horizontally'] = np.random.randint(2)
rule['vertically'] = np.random.randint(2)
rule['intersect'] = get_random_out_color()
elif random_type == 'color_for_inners':
rule['color_out'] = get_random_out_color()
elif random_type == 'crop_figure':
rule['mode'] = random.choice(['smallest', 'biggest'])
rule['dif_c_edge'] = random.choice([True, False])
elif random_type == 'unity':
rule['mode'] = random.choice(['diagonal', 'horizontal', 'vertical', 'horver'])
# rule['inner'] = np.random.choice(2)
rule['ignore_colors'] = [0]
if np.random.rand() < 0.5:
rule['ignore_colors'] += [get_random_all_color()]
rule['with_color'] = random.choice([get_random_out_color(), 0])
elif random_type == 'map_color':
rule['color_in'] = get_random_all_color()
rule['color_out'] = get_random_out_color()
elif random_type == 'gravity':
rule['gravity_type'] = random.choice(['figures', 'cells'])
rule['steps_limit'] = np.random.choice(2)
rule['look_at_what_to_move'] = np.random.choice(2)
if rule['look_at_what_to_move'] == 1:
rule['color_what'] = get_random_out_color()
rule['direction_type'] = random.choice(['border', 'color'])
if rule['direction_type'] == 'border':
rule['direction_border'] = random.choice(['top', 'bottom', 'left', 'right'])
else:
rule['direction_color'] = get_random_color()
elif random_type == 'split_by_H' or random_type == 'split_by_W':
rule['merge_rule'] = random.choice(['and', 'equal', 'or', 'xor'])
elif random_type == 'align_pattern':
rule['macro_type'] = 'global_interaction_rule'
# rule['allow_rotation'] = False
rule['allow_color'] = get_random_all_color()
rule['fill_with_color'] = 0 #random.choice([0, get_random_all_color()])
return rule
def get_task_metadata(task):
colors = []
shapes_input = [[], []]
shapes_output = [[], []]
for part in ['train']:
for uni_task in task[part]:
inp = uni_task['input']
colors += list(np.unique(inp))
out = uni_task['output']
colors += list(np.unique(out))
shapes_input[0].append(inp.shape[0])
shapes_input[1].append(inp.shape[1])
shapes_output[0].append(out.shape[0])
shapes_output[1].append(out.shape[1])
all_colors = np.unique(colors)
min_k1 = int(np.floor(np.min(np.array(shapes_output[0])/np.array(shapes_input[0]))))
min_k2 = int(np.floor(np.min(np.array(shapes_output[1])/np.array(shapes_input[1]))))
max_k1 = int(np.ceil(np.max(np.array(shapes_output[0])/np.array(shapes_input[0]))))
max_k2 = int(np.ceil(np.max(np.array(shapes_output[1])/np.array(shapes_input[1]))))
max_shape = np.max([shapes_input])
config = {}
config['mink1'] = max(1, min(min(min_k1, 30//max_shape), 3))
config['mink2'] = max(1, min(min(min_k2, 30//max_shape), 3))
config['maxk1'] = max(1, min(min(max_k1, 30//max_shape), 3))
config['maxk2'] = max(1, min(min(max_k2, 30//max_shape), 3))
config['allow_make_smaller'] = False
config['allow_make_bigger'] = False
for uni_task in task['train']:
if uni_task['input'].shape[0] > uni_task['output'].shape[0] or \
uni_task['input'].shape[1] > uni_task['output'].shape[1]:
config['allow_make_smaller'] = True
if uni_task['input'].shape[0] < uni_task['output'].shape[0] or \
uni_task['input'].shape[1] < uni_task['output'].shape[1]:
config['allow_make_bigger'] = True
colors_out = []
changed_colors = []
inp_colors = []
for uni_task in task['train']:
inp = uni_task['input']
out = uni_task['output']
for i in range(min(inp.shape[0], out.shape[0])):
for j in range(min(inp.shape[1], out.shape[1])):
inp_colors.append(inp[i, j])
if out[i, j] != inp[i, j]:
colors_out.append(out[i, j])
changed_colors.append(inp[i, j])
inp_colors = np.unique(inp_colors)
changed_colors = np.unique(changed_colors)
config['ignore_colors'] = [c for c in inp_colors if not c in changed_colors]
config['possible_ignore_colors'] = np.array([c for c in all_colors if not c in config['ignore_colors']])
if len(colors_out) == 0:
colors_out = [0]
config['possible_colors_out'] = np.unique(colors_out)
return all_colors, config
def compute_parametrized_automata(input, hidden_i, rules):
output = np.zeros_like(input, dtype=int)
hidden_o = np.copy(hidden_i)
for i, j in product(range(input.shape[0]), range(input.shape[1])):
i_c = input[i, j]
i_nbh = nbh(input, i, j)
# cells adagent to the current one
i_direct_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 0), (-1, 0), (0, 1), (0, -1)}}
i_indirect_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 1), (-1, -1), (-1, 1), (1, -1)}}
is_top_b, is_bottom_b = i == 0, i == input.shape[0] - 1
is_left_b, is_right_b = j == 0, j == input.shape[1] - 1
is_b = is_top_b or is_bottom_b or is_left_b or is_right_b
if i_c > 0:
output[i, j] = i_c
for rule in rules:
if i_c in rule['ignore_colors']:
continue
if rule['type'] == 'copy_color_by_direction':
if rule['direction'] == 'bottom' or rule['direction'] == 'everywhere':
if not is_top_b and input[i - 1, j] in rule['copy_color'] and \
(i == 1 or input[i - 2, j] == rule['look_back_color']):
output[i, j] = input[i - 1, j]
break
if rule['direction'] == 'top' or rule['direction'] == 'everywhere':
if not is_bottom_b and input[i + 1, j] in rule['copy_color'] and \
(i == input.shape[0] - 2 or input[i + 2, j] == rule['look_back_color']):
output[i, j] = input[i + 1, j]
break
if rule['direction'] == 'right' or rule['direction'] == 'everywhere':
if not is_left_b and input[i, j - 1] in rule['copy_color'] and \
(j == 1 or input[i, j - 2] == rule['look_back_color']):
output[i, j] = input[i, j - 1]
break
if rule['direction'] == 'left' or rule['direction'] == 'everywhere':
if not is_right_b and input[i, j + 1] in rule['copy_color'] and \
(j == input.shape[1] - 2 or input[i, j + 2] == rule['look_back_color']):
output[i, j] = input[i, j + 1]
break
elif rule['type'] == 'corner_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = 3
out_nbh = rule['nbh_check_out']
i_uplecorner_nbh = {k: v for k, v in i_nbh.items() if k in {(-1, -1), (-1, 0), (0, -1)}}
i_upricorner_nbh = {k: v for k, v in i_nbh.items() if k in {(-1, 1), (-1, 0), (0, 1)}}
i_dolecorner_nbh = {k: v for k, v in i_nbh.items() if k in {(1, -1), (1, 0), (0, -1)}}
i_doricorner_nbh = {k: v for k, v in i_nbh.items() if k in {(1, 1), (1, 0), (0, 1)}}
if sum(1 for v in i_nbh.values() if v in color_nbh) < 3:
continue
did_something = False
for corner_idx in [i_uplecorner_nbh, i_upricorner_nbh, i_dolecorner_nbh, i_doricorner_nbh]:
for color in color_nbh:
if sum(1 for v in corner_idx.values() if v == color) == sum_nbh:
output[i, j] = out_nbh
did_something = True
break
if did_something:
break
if did_something:
break
elif rule['type'] == 'nbh_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = rule['nbh_check_sum']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_nbh.values()
if sum(1 for v in proper_nbhs if v in color_nbh) > sum_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'direct_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = rule['nbh_check_sum']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_direct_nbh.values()
if sum(1 for v in proper_nbhs if v in color_nbh) > sum_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'indirect_check':
color_nbh = rule['nbh_check_colors']
sum_nbh = rule['nbh_check_sum']
out_nbh = rule['nbh_check_out']
proper_nbhs = i_indirect_nbh.values()
if sum(1 for v in proper_nbhs if v in color_nbh) > sum_nbh:
output[i, j] = out_nbh
break
elif rule['type'] == 'color_distribution':
directions = ['top', 'bottom', 'left', 'right', 'top_left', 'bottom_left', 'top_right', 'bottom_right']
not_border_conditions = \
[
not is_top_b,
not is_bottom_b,
not is_left_b,
not is_right_b,
not is_top_b and not is_left_b,
not is_bottom_b and not is_left_b,
not is_top_b and not is_right_b,
not is_bottom_b and not is_right_b
]
index_from = \
[
(i - 1, j),
(i + 1, j),
(i, j - 1),
(i, j + 1),
(i - 1, j - 1),
(i + 1, j - 1),
(i - 1, j + 1),
(i + 1, j + 1)
]
did_something = False
for i_dir, direction in enumerate(directions):
if rule['direction'] == direction:
if not_border_conditions[i_dir]:
if (rule['check_in_empty'] == 1 and input[index_from[i_dir]] > 0) or \
(rule['check_in_empty'] == 0 and input[index_from[i_dir]] == rule['color_in']):
output[i, j] = rule['color_out']
did_something = True
break
if did_something:
break
return output, hidden_o
def get_connectivity_info(color: np.array, ignore_black = False, von_neumann_only = False, edge_for_difcolors = False):
# UnionFind structure allows us to detect all connected areas in a linear time.
class UnionFind:
def __init__(self) -> None:
self.area = np.ones(color.size)
self.parent = np.arange(color.size)
def find(self, x: int) -> int:
if self.parent[x] != x:
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def union(self, u: int, v: int) -> None:
root_u, root_v = self.find(u), self.find(v)
if root_u != root_v:
area_u, area_v = self.area[root_u], self.area[root_v]
if area_u < area_v:
root_u, root_v = root_v, root_u
self.parent[root_v] = root_u
self.area[root_u] = area_u + area_v
union_find = UnionFind()
neighbours = [[-1, 0], [0, -1], [1, 0], [0, 1]]
if not von_neumann_only:
neighbours.extend([[-1, -1], [1, -1], [1, 1], [-1, 1]])
nrows, ncols = color.shape
for i in range(nrows):
for j in range(ncols):
for s, t in neighbours:
u, v = i + s, j + t
if u >= 0 and u < nrows and v >= 0 and v < ncols and \
(color[u, v] == color[i, j] or (edge_for_difcolors | |
mutually
exclusive. If not offset-related params are provided, offset
equals to 0.
:param Query query_set: Existing queryset. If provided, all queries
are applied to it instead of creating new queryset. Defaults
to None.
:param _count: When provided, only results number is returned as
integer.
:param _explain: When provided, query performed(SQL) is returned
as a string instead of query results.
:param bool _raise_on_empty: When True JHTTPNotFound is raised
if query returned no results. Defaults to False in which case
error is just logged and empty query results are returned.
:returns: Query results as ``sqlalchemy.orm.query.Query`` instance.
May be sorted, offset, limited.
:returns: Dict of {'field_name': fieldval}, when ``_fields`` param
is provided.
:returns: Number of query results as an int when ``_count`` param
is provided.
:returns: String representing query ran when ``_explain`` param
is provided.
:raises JHTTPNotFound: When ``_raise_on_empty=True`` and no
results found.
:raises JHTTPNotFound: When ``_item_request=True`` and
``sqlalchemy.exc.DataError`` exception is raised during DB
query. Latter exception is raised when querying DB with
an identifier of a wrong type. E.g. when querying Int field
with a string.
:raises JHTTPBadRequest: When ``_item_request=False`` and
``sqlalchemy.exc.DataError`` exception is raised during DB
query.
:raises JHTTPBadRequest: When ``sqlalchemy.exc.InvalidRequestError``
or ``sqlalchemy.exc.IntegrityError`` errors happen during DB
query.
"""
log.debug('Get collection: {}, {}'.format(cls.__name__, params))
params.pop('__confirmation', False)
_strict = params.pop('_strict', True)
_item_request = params.pop('_item_request', False)
_sort = _split(params.pop('_sort', []))
_fields = _split(params.pop('_fields', []))
_limit = params.pop('_limit', None)
_page = params.pop('_page', None)
_start = params.pop('_start', None)
query_set = params.pop('query_set', None)
_count = '_count' in params
params.pop('_count', None)
_explain = '_explain' in params
params.pop('_explain', None)
_raise_on_empty = params.pop('_raise_on_empty', False)
if query_set is None:
query_set = Session().query(cls)
# Remove any __ legacy instructions from this point on
params = dictset({
key: val for key, val in params.items()
if not key.startswith('__')
})
iterables_exprs, params = cls._pop_iterables(params)
params = drop_reserved_params(params)
if _strict:
_check_fields = [
f.strip('-+') for f in list(params.keys()) + _fields + _sort]
cls.check_fields_allowed(_check_fields)
else:
params = cls.filter_fields(params)
process_lists(params)
process_bools(params)
# If param is _all then remove it
params.pop_by_values('_all')
try:
query_set = query_set.filter_by(**params)
# Apply filtering by iterable expressions
for expr in iterables_exprs:
query_set = query_set.from_self().filter(expr)
_total = query_set.count()
if _count:
return _total
# Filtering by fields has to be the first thing to do on
# the query_set!
query_set = cls.apply_fields(query_set, _fields)
query_set = cls.apply_sort(query_set, _sort)
if _limit is not None:
_start, _limit = process_limit(_start, _page, _limit)
query_set = query_set.offset(_start).limit(_limit)
if not query_set.count():
msg = "'%s(%s)' resource not found" % (cls.__name__, params)
if _raise_on_empty:
raise JHTTPNotFound(msg)
else:
log.debug(msg)
except DataError as ex:
if _item_request:
msg = "'{}({})' resource not found".format(
cls.__name__, params)
raise JHTTPNotFound(msg, explanation=ex.message)
else:
raise JHTTPBadRequest(str(ex), extra={'data': ex})
except (InvalidRequestError,) as ex:
raise JHTTPBadRequest(str(ex), extra={'data': ex})
query_sql = str(query_set).replace('\n', '')
if _explain:
return query_sql
log.debug('get_collection.query_set: %s (%s)', cls.__name__, query_sql)
if _fields:
query_set = cls.add_field_names(query_set, _fields)
query_set._nefertari_meta = dict(
total=_total,
start=_start,
fields=_fields)
return query_set
@classmethod
def add_field_names(cls, query_set, requested_fields):
""" Convert list of tuples to dict with proper field keys. """
from .utils import FieldsQuerySet
fields = [col['name'] for col in query_set.column_descriptions] + [
'_type']
add_vals = (cls.__name__,)
pk_field = cls.pk_field()
def _convert(val):
return dict(zip(fields, val+add_vals))
def _add_pk(obj):
if pk_field in obj:
obj['_pk'] = obj[pk_field]
if pk_field not in requested_fields:
obj.pop(pk_field)
return obj
values = query_set.all()
converted = [_add_pk(_convert(val)) for val in values]
return FieldsQuerySet(converted)
@classmethod
def has_field(cls, field):
return field in cls.native_fields()
@classmethod
def native_fields(cls):
columns = list(cls._mapped_columns().keys())
relationships = list(cls._mapped_relationships().keys())
return columns + relationships
@classmethod
def _mapped_columns(cls):
return {c.name: c for c in class_mapper(cls).columns}
@classmethod
def _mapped_relationships(cls):
return {c.key: c for c in class_mapper(cls).relationships}
@classmethod
def fields_to_query(cls):
query_fields = [
'id', '_limit', '_page', '_sort', '_fields', '_count', '_start']
return list(set(query_fields + cls.native_fields()))
@classmethod
def get_item(cls, **params):
""" Get single item and raise exception if not found.
Exception raising when item is not found can be disabled
by passing ``_raise_on_empty=False`` in params.
:returns: Single collection item as an instance of ``cls``.
"""
params.setdefault('_raise_on_empty', True)
params['_limit'] = 1
params['_item_request'] = True
query_set = cls.get_collection(**params)
return query_set.first()
def unique_fields(self):
native_fields = class_mapper(self.__class__).columns
return [f for f in native_fields if f.unique or f.primary_key]
@classmethod
def get_or_create(cls, **params):
defaults = params.pop('defaults', {})
_limit = params.pop('_limit', 1)
query_set = cls.get_collection(_limit=_limit, **params)
try:
obj = query_set.one()
return obj, False
except NoResultFound:
defaults.update(params)
new_obj = cls(**defaults)
new_obj.save()
return new_obj, True
except MultipleResultsFound:
raise JHTTPBadRequest('Bad or Insufficient Params')
def _update(self, params, **kw):
process_bools(params)
self.check_fields_allowed(list(params.keys()))
columns = {c.name: c for c in class_mapper(self.__class__).columns}
iter_columns = set(
k for k, v in columns.items()
if isinstance(v, (DictField, ListField)))
pk_field = self.pk_field()
for key, new_value in params.items():
# Can't change PK field
if key == pk_field:
continue
if key in iter_columns:
self.update_iterables(new_value, key, unique=True, save=False)
else:
setattr(self, key, new_value)
return self
@classmethod
def _delete_many(cls, items, request=None,
synchronize_session=False):
""" Delete :items: queryset or objects list.
When queryset passed, Query.delete() is used to delete it but
first queryset is re-queried to clean it from explicit
limit/offset/etc.
If some of the methods listed above were called, or :items: is not
a Query instance, one-by-one items update is performed.
`on_bulk_delete` function is called to delete objects from index
and to reindex relationships. This is done explicitly because it is
impossible to get access to deleted objects in signal handler for
'after_bulk_delete' ORM event.
"""
if isinstance(items, Query):
del_queryset = cls._clean_queryset(items)
del_items = del_queryset.all()
del_count = del_queryset.delete(
synchronize_session=synchronize_session)
on_bulk_delete(cls, del_items, request)
return del_count
items_count = len(items)
session = Session()
for item in items:
item._request = request
session.delete(item)
session.flush()
return items_count
@classmethod
def _update_many(cls, items, params, request=None,
synchronize_session='fetch'):
""" Update :items: queryset or objects list.
When queryset passed, Query.update() is used to update it but
first queryset is re-queried to clean it from explicit
limit/offset/etc.
If some of the methods listed above were called, or :items: is not
a Query instance, one-by-one items update is performed.
"""
if isinstance(items, Query):
upd_queryset = cls._clean_queryset(items)
upd_queryset._request = request
upd_count = upd_queryset.update(
params, synchronize_session=synchronize_session)
return upd_count
items_count = len(items)
for item in items:
item.update(params, request)
return items_count
@classmethod
def _clean_queryset(cls, queryset):
""" Clean :queryset: from explicit limit, offset, etc.
New queryset is created by querying collection by IDs from
passed queryset.
"""
pk_field = getattr(cls, cls.pk_field())
pks_query = queryset.with_entities(pk_field)
return queryset.session.query(cls).filter(
pk_field.in_(pks_query))
def __repr__(self):
pk_field = self.pk_field()
parts = [
'{}={}'.format(pk_field, getattr(self, pk_field)),
]
return '<{}: {}>'.format(self.__class__.__name__, ', '.join(parts))
@classmethod
def get_by_ids(cls, ids, **params):
query_set = cls.get_collection(**params)
cls_id = getattr(cls, cls.pk_field())
return query_set.from_self().filter(cls_id.in_(ids)).limit(len(ids))
@classmethod
def get_null_values(cls):
""" Get null values of :cls: fields. """
skip_fields = set(['_acl'])
null_values = {}
columns = cls._mapped_columns()
columns.update(cls._mapped_relationships())
for name, col in columns.items():
if name in skip_fields:
continue
if isinstance(col, RelationshipProperty) and col.uselist:
value = []
else:
value = None
null_values[name] = value
return null_values
def to_dict(self, **kwargs):
_depth = kwargs.get('_depth')
if _depth is None:
_depth = self._nesting_depth
depth_reached = _depth is not None and _depth <= 0
_data = dictset()
native_fields = self.__class__.native_fields()
for field in native_fields:
value = getattr(self, field, None)
include = field in self._nested_relationships
if not include or depth_reached:
encoder = lambda v: getattr(v, v.pk_field(), None)
else:
encoder = lambda v: v.to_dict(_depth=_depth-1)
if isinstance(value, BaseMixin):
value = encoder(value)
elif isinstance(value, InstrumentedList):
value = [encoder(val) for val in value]
elif hasattr(value, 'to_dict'):
value = value.to_dict(_depth=_depth-1)
_data[field] = value
_data['_type'] = self._type
_data['_pk'] = str(getattr(self, self.pk_field()))
return _data
def update_iterables(self, params, attr, unique=False,
value_type=None, save=True,
request=None):
self._request = request
mapper = class_mapper(self.__class__)
columns = {c.name: c for c in mapper.columns}
is_dict = isinstance(columns.get(attr), DictField)
is_list = isinstance(columns.get(attr), ListField)
def split_keys(keys):
neg_keys, pos_keys = [], []
for key in keys:
if key.startswith('__'):
continue
if key.startswith('-'):
neg_keys.append(key[1:])
else:
pos_keys.append(key.strip())
return pos_keys, neg_keys
def update_dict(update_params):
final_value = getattr(self, attr, {}) or {}
final_value = final_value.copy()
if update_params is None or update_params == '':
if not final_value:
return
update_params = {
'-' + key: val for key, val in final_value.items()}
positive, negative = split_keys(list(update_params.keys()))
# Pop negative keys
for key in negative:
final_value.pop(key, None)
# Set positive keys
for key in | |
False
def export(self, write, level, namespace_='maecPackage:', name_='ObjectEquivalenceType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(write, level, pretty_print)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(write, level, already_processed, namespace_, name_='ObjectEquivalenceType')
if self.hasContent_():
write('>%s' % (eol_, ))
self.exportChildren(write, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(write, level, pretty_print)
write('</%s%s>%s' % (namespace_, name_, eol_))
else:
write('/>%s' % (eol_, ))
def exportAttributes(self, write, level, already_processed, namespace_='maecPackage:', name_='ObjectEquivalenceType'):
super(ObjectEquivalenceType, self).exportAttributes(write, level, already_processed, namespace_, name_='ObjectEquivalenceType')
if self.id is not None and 'id' not in already_processed:
already_processed.add('id')
write(' id=%s' % (quote_attrib(self.id), ))
def exportChildren(self, write, level, namespace_='maecPackage:', name_='ObjectEquivalenceType', fromsubclass_=False, pretty_print=True):
super(ObjectEquivalenceType, self).exportChildren(write, level, 'maecPackage:', name_, True, pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.add('id')
self.id = value
super(ObjectEquivalenceType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(ObjectEquivalenceType, self).buildChildren(child_, node, nodeName_, True)
pass
# end class ObjectEquivalenceType
class HypervisorHostSystemType(system_object.SystemObjectType):
"""The HypervisorHostSystemType characterizes the VM Hypervisor host
system used in the malware analysis environment."""
subclass = None
superclass = system_object.SystemObjectType
def __init__(self, object_reference=None, Custom_Properties=None, Available_Physical_Memory=None, BIOS_Info=None, Date=None, Hostname=None, Local_Time=None, Network_Interface_List=None, OS=None, Processor=None, Processor_Architecture=None, System_Time=None, Timezone_DST=None, Timezone_Standard=None, Total_Physical_Memory=None, Uptime=None, Username=None, VM_Hypervisor=None):
super(HypervisorHostSystemType, self).__init__(object_reference, Custom_Properties, Available_Physical_Memory, BIOS_Info, Date, Hostname, Local_Time, Network_Interface_List, OS, Processor, Processor_Architecture, System_Time, Timezone_DST, Timezone_Standard, Total_Physical_Memory, Uptime, Username, )
self.VM_Hypervisor = VM_Hypervisor
def factory(*args_, **kwargs_):
if HypervisorHostSystemType.subclass:
return HypervisorHostSystemType.subclass(*args_, **kwargs_)
else:
return HypervisorHostSystemType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_VM_Hypervisor(self): return self.VM_Hypervisor
def set_VM_Hypervisor(self, VM_Hypervisor): self.VM_Hypervisor = VM_Hypervisor
def hasContent_(self):
if (
self.VM_Hypervisor is not None or
super(HypervisorHostSystemType, self).hasContent_()
):
return True
else:
return False
def export(self, write, level, namespace_='maecPackage:', name_='HypervisorHostSystemType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(write, level, pretty_print)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(write, level, already_processed, namespace_, name_='HypervisorHostSystemType')
if self.hasContent_():
write('>%s' % (eol_, ))
self.exportChildren(write, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(write, level, pretty_print)
write('</%s%s>%s' % (namespace_, name_, eol_))
else:
write('/>%s' % (eol_, ))
def exportAttributes(self, write, level, already_processed, namespace_='maecPackage:', name_='HypervisorHostSystemType'):
super(HypervisorHostSystemType, self).exportAttributes(write, level, already_processed, namespace_, name_='HypervisorHostSystemType')
def exportChildren(self, write, level, namespace_='maecPackage:', name_='HypervisorHostSystemType', fromsubclass_=False, pretty_print=True):
super(HypervisorHostSystemType, self).exportChildren(write, level, 'maecPackage:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.VM_Hypervisor is not None:
self.VM_Hypervisor.export(write, level, 'maecPackage:', name_='VM_Hypervisor', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(HypervisorHostSystemType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'VM_Hypervisor':
obj_ = cybox_common.PlatformSpecificationType.factory()
obj_.build(child_)
self.set_VM_Hypervisor(obj_)
super(HypervisorHostSystemType, self).buildChildren(child_, node, nodeName_, True)
# end class HypervisorHostSystemType
class AnalysisSystemType(system_object.SystemObjectType):
"""The AnalysisSystemType is intended to characterize any systems on
which malware analysis is performed. It imports and extends
version 1.3 of the CybOX System Object."""
subclass = None
superclass = system_object.SystemObjectType
def __init__(self, object_reference=None, Custom_Properties=None, Available_Physical_Memory=None, BIOS_Info=None, Date=None, Hostname=None, Local_Time=None, Network_Interface_List=None, OS=None, Processor=None, Processor_Architecture=None, System_Time=None, Timezone_DST=None, Timezone_Standard=None, Total_Physical_Memory=None, Uptime=None, Username=None, Installed_Programs=None):
super(AnalysisSystemType, self).__init__(object_reference, Custom_Properties, Available_Physical_Memory, BIOS_Info, Date, Hostname, Local_Time, Network_Interface_List, OS, Processor, Processor_Architecture, System_Time, Timezone_DST, Timezone_Standard, Total_Physical_Memory, Uptime, Username, )
self.Installed_Programs = Installed_Programs
def factory(*args_, **kwargs_):
if AnalysisSystemType.subclass:
return AnalysisSystemType.subclass(*args_, **kwargs_)
else:
return AnalysisSystemType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Installed_Programs(self): return self.Installed_Programs
def set_Installed_Programs(self, Installed_Programs): self.Installed_Programs = Installed_Programs
def hasContent_(self):
if (
self.Installed_Programs is not None or
super(AnalysisSystemType, self).hasContent_()
):
return True
else:
return False
def export(self, write, level, namespace_='maecPackage:', name_='AnalysisSystemType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(write, level, pretty_print)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(write, level, already_processed, namespace_, name_='AnalysisSystemType')
if self.hasContent_():
write('>%s' % (eol_, ))
self.exportChildren(write, level + 1, namespace_, name_, pretty_print=pretty_print)
showIndent(write, level, pretty_print)
write('</%s%s>%s' % (namespace_, name_, eol_))
else:
write('/>%s' % (eol_, ))
def exportAttributes(self, write, level, already_processed, namespace_='maecPackage:', name_='AnalysisSystemType'):
super(AnalysisSystemType, self).exportAttributes(write, level, already_processed, namespace_, name_='AnalysisSystemType')
def exportChildren(self, write, level, namespace_='maecPackage:', name_='AnalysisSystemType', fromsubclass_=False, pretty_print=True):
super(AnalysisSystemType, self).exportChildren(write, level, 'maecPackage:', name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Installed_Programs is not None:
self.Installed_Programs.export(write, level, 'maecPackage:', name_='Installed_Programs', pretty_print=pretty_print)
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AnalysisSystemType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Installed_Programs':
obj_ = InstalledProgramsType.factory()
obj_.build(child_)
self.set_Installed_Programs(obj_)
super(AnalysisSystemType, self).buildChildren(child_, node, nodeName_, True)
# end class AnalysisSystemType
class CommentType(cybox_common.StructuredTextType):
"""The CommentType captures a comment relating to some MAEC entity.The
author field specifies the name of the author that added the
comment.The timestamp field specifies the date/time that the
comment was added."""
subclass = None
superclass = cybox_common.StructuredTextType
def __init__(self, structuring_format=None, timestamp=None, author=None, observation_name=None, valueOf_=None):
super(CommentType, self).__init__(structuring_format, valueOf_, )
self.timestamp = _cast(None, timestamp)
self.author = _cast(None, author)
self.observation_name = observation_name
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if CommentType.subclass:
return CommentType.subclass(*args_, **kwargs_)
else:
return CommentType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_timestamp(self): return self.timestamp
def set_timestamp(self, timestamp): self.timestamp = timestamp
def get_author(self): return self.author
def set_author(self, author): self.author = author
def get_observation_name(self): return self.observation_name
def set_observation_name(self, observation_name): self.observation_name = observation_name
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def hasContent_(self):
if (
self.valueOf_ or
super(CommentType, self).hasContent_()
):
return True
else:
return False
def export(self, write, level, namespace_='maecPackage:', name_='CommentType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(write, level, pretty_print)
write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(write, level, already_processed, namespace_, name_='CommentType')
if self.hasContent_():
write('>')
write(quote_xml(self.valueOf_))
self.exportChildren(write, level + 1, namespace_, name_, pretty_print=pretty_print)
write('</%s%s>%s' % (namespace_, name_, eol_))
else:
write('/>%s' % (eol_, ))
def exportAttributes(self, write, level, already_processed, namespace_='maecPackage:', name_='CommentType'):
super(CommentType, self).exportAttributes(write, level, already_processed, namespace_, name_='CommentType')
if self.timestamp is not None and 'timestamp' not in already_processed:
already_processed.add('timestamp')
write(' timestamp="%s"' % self.gds_format_datetime(self.timestamp, input_name='timestamp'))
if self.author is not None and 'author' not in already_processed:
already_processed.add('author')
write(' author=%s' % (quote_attrib(self.author)))
if self.observation_name is not None and 'observation_name' not in already_processed:
already_processed.add('observation_name')
write(' observation_name=%s' % (quote_attrib(self.observation_name)))
def exportChildren(self, write, level, namespace_='maecPackage:', name_='CommentType', fromsubclass_=False, pretty_print=True):
super(CommentType, self).exportChildren(write, level, 'maecPackage:', name_, True, pretty_print=pretty_print)
pass
def build(self, node):
self.__sourcenode__ = node
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('timestamp', node)
if value is not None and 'timestamp' not in already_processed:
already_processed.add('timestamp')
try:
self.timestamp = value
except ValueError as exp:
raise ValueError('Bad date-time attribute (timestamp): %s' % exp)
value = find_attr_value_('author', node)
if value is not None and 'author' not in already_processed:
already_processed.add('author')
self.author = value
super(CommentType, self).buildAttributes(node, attrs, already_processed)
value = find_attr_value_('observation_name', node)
if value is not None and 'observation_name' not in already_processed:
already_processed.add('observation_name')
self.observation_name = value
super(CommentType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class CommentType
class MalwareExceptionType(cybox_common.ErrorType):
"""The MalwareExceptionType captures details of exceptions that may be
raised as a result of a malware instance executing on a
system.The is_fatal field specifies whether the exception is
fatal; that is, whether it caused the malware instance to
terminate."""
subclass = None
superclass = cybox_common.ErrorType
def __init__(self, is_fatal=None, Error_Type=None, Error_Count=None, Error_Instances=None, Exception_Code=None, Faulting_Address=None, Description=None):
super(MalwareExceptionType, self).__init__(Error_Type=None, Error_Count=None, Error_Instances=None)
self.is_fatal = _cast(bool, is_fatal)
self.Exception_Code = Exception_Code
self.Faulting_Address = Faulting_Address
self.Description = Description
def factory(*args_, **kwargs_):
if MalwareExceptionType.subclass:
return MalwareExceptionType.subclass(*args_, **kwargs_)
else:
return MalwareExceptionType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Exception_Code(self): return self.Exception_Code
def set_Exception_Code(self, Exception_Code): self.Exception_Code = Exception_Code
def get_Faulting_Address(self): return self.Faulting_Address
def set_Faulting_Address(self, Faulting_Address): self.Faulting_Address = Faulting_Address
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_is_fatal(self): return self.is_fatal
def set_is_fatal(self, is_fatal): self.is_fatal = is_fatal
def hasContent_(self):
if (
self.Exception_Code is not None or
self.Faulting_Address is not None or
self.Description is not None
):
return True
else:
return False
def export(self, write, level, namespace_='maecPackage:', name_='MalwareExceptionType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ | |
<filename>storm_control/hal4000/settings/settings.py
#!/usr/bin/env python
"""
Handles all interaction / communication with the parameters widget.
This widget is responsible for keeping track of the various
different parameter files that the user has loaded as well as
editting and saving these parameters.
Unlike in Python2/PyQt4 HAL there is no longer a single current
parameter object that is shared across all the modules.
The 'parameters of record' are those that are stored by each
module, though they are expected to match this modules
parameters.
In general, modules should update their parameters with the
values from these parameters when settings are changed. Module
should try not to use these parameters directly as their parameters,
though this will sometimes be unavoidable, e.g. with the feeds.
This is because at least some of these parameters will come directly
from an XML file and may not have the complete type information,
for example they will just be ParameterInt when the module might
be expecting to work with a ParameterRangeInt.
Hazen 03/17
"""
import copy
import os
from PyQt5 import QtWidgets
import storm_control.sc_library.halExceptions as halExceptions
import storm_control.sc_library.parameters as params
import storm_control.hal4000.halLib.halMessage as halMessage
import storm_control.hal4000.halLib.halMessageBox as halMessageBox
import storm_control.hal4000.halLib.halModule as halModule
import storm_control.hal4000.settings.parametersBox as parametersBox
class Settings(halModule.HalModule):
def __init__(self, module_params = None, qt_settings = None, **kwds):
super().__init__(**kwds)
self.locked_out = False
self.wait_for = []
self.waiting_on = []
self.view = parametersBox.ParametersBox(module_params = module_params,
qt_settings = qt_settings)
self.view.editParameters.connect(self.handleEditParameters)
self.view.newParameters.connect(self.handleNewParameters)
p = params.StormXMLObject()
p.set("parameters_file", os.path.join(module_params.get("directory"), "default.xml"))
#
# Add parameter to record whether or not these parameters have actually
# been used (as opposed to just appearing in the list view).
#
# They should be initialized since this is what we are starting with..
#
p.add(params.ParameterSetBoolean(name = "initialized",
value = False,
is_mutable = False,
is_saved = False))
self.view.addParameters(p, is_default = True)
self.configure_dict = {"ui_order" : 0,
"ui_parent" : "hal.containerWidget",
"ui_widget" : self.view}
# This message marks the beginning and the end of the parameter change
# life cycle.
halMessage.addMessage("changing parameters",
validator = {"data" : {"changing" : [True, bool]},
"resp" : None})
# Other modules should respond to this message with their current
# parameters.
halMessage.addMessage("current parameters",
validator = {"data" : None,
"resp" : {"parameters" : [False, params.StormXMLObject]}})
# A request from another module for one of the sets of parameters.
halMessage.addMessage("get parameters",
validator = {"data" : {"index or name" : [True, (str, int)]},
"resp" : {"parameters" : [False, params.StormXMLObject],
"found" : [True, bool]}})
# The current parameters have changed.
#
# Data includes a copy of the desired new parameters. Other modules
# should at least check if the new parameters are okay. They may
# defer actually re-configuring until they receive the
# 'updated parameters' message.
#
# Other modules that respond should send two response:
# 1. A response with a copy of their old parameter as "old parameters".
# 2. A response with their updated parameters as "new parameters".
#
# The response is structured this way so that if an error occurs
# during the parameter update we still have a record of the last
# good state in "old parameters".
#
# Notes:
# 1. We send a copy of the parameters in the listview, so if the
# module wants to it can just use these as the parameters without
# copying them again.
#
# 2. The 'old parameters' response should be a copy.
#
# 3. The 'new parameters' response does not need to be a copy.
#
halMessage.addMessage("new parameters",
validator = {"data" : {"parameters" : [True, params.StormXMLObject],
"is_edit" : [True, bool]},
"resp" : {"new parameters" : [False, params.StormXMLObject],
"old parameters" : [False, params.StormXMLObject]}})
# This comes from other modules that requested "wait for" at startup.
#
# Modules may respond with their new parameters here if they did not know
# the final values for the parameters at 'new parameters'. At this point
# however the modules cannot complain that the parameters they were given
# were invalid, this has to be done at 'new parameters'.
#
halMessage.addMessage("parameters changed",
validator = {"data" : {"new parameters" : [False, params.StormXMLObject]},
"resp" : None})
# A request from another module to set the current parameters.
halMessage.addMessage("set parameters",
validator = {"data" : {"index or name" : [True, (str, int)]},
"resp" : {"found" : [True, bool],
"current" : [True, bool]}})
# The updated parameters.
#
# These are the updated values of parameters of all of the modules.
# This is sent immediately after all of the modules respond to
# the 'new parameters' message.
#
# The parameter change cycle won't actually complete till all the
# modules that requested a wait send the "parameters changed" message.
#
halMessage.addMessage("updated parameters",
validator = {"data" : {"parameters" : [True, params.StormXMLObject]}})
def handleEditParameters(self):
"""
Send the 'current parameters' message.
Once all the modules have responded with their current parameters
we will start the editor.
"""
#
# FIXME: Not all modules are responding to this message. Not clear
# whether we should require a response or not. This will use
# possibly incorrect parameter values if there is no response,
# but not all modules have parameters.
#
self.sendMessage(halMessage.HalMessage(m_type = "current parameters"))
def handleError(self, message, m_error):
# We can hopefully handle all 'new parameters' errors by reverting
# to the previous good parameters. The actual reversion happens in
# handleResponses.
if message.isType("new parameters"):
return True
def handleNewParameters(self, parameters, is_edit):
"""
Sends the 'new parameters' message.
The updated parameters could be a modified form of the current parameters or
it could be a different set. We use the is_edit flag to record which of these
two it is.
"""
if self.locked_out:
raise halExceptions.HalException("parameter change attempted while locked out.")
# Disable the UI so the user can't change the parameters again while we
# are processing the current change.
self.view.enableUI(False)
self.setLockout(True)
# is_edit means we are sending a modified version of the current parameters.
self.sendMessage(halMessage.HalMessage(m_type = "new parameters",
data = {"parameters" : parameters.copy(),
"is_edit" : is_edit}))
def handleResponses(self, message):
if message.isType("current parameters"):
# Update our copy of the current parameters.
for response in message.getResponses():
data = response.getData()
if "parameters" in data:
self.view.updateCurrentParameters(response.source,
data["parameters"].copy())
# Start the editor.
self.view.startParameterEditor()
elif message.isType("new parameters"):
# Check if we got any errors.
if message.hasErrors():
# Create a message box with the first error.
msg = "New Parameters:\n\n"
for m_error in message.getErrors():
msg += "Got an error from '" + m_error.source + "' of type '" + m_error.message + "'!\n\n"
msg += "Attempting to revert to the last known good parameters."
halMessageBox.halMessageBoxInfo(msg)
# Attempt reversion.
# Replace the 'bad' parameters with their previous 'good' values.
if message.getData()["is_edit"]:
for response in message.getResponses():
data = response.getData()
if "old parameters" in data:
self.view.updateCurrentParameters(response.source, data["old parameters"])
self.sendMessage(halMessage.HalMessage(m_type = "new parameters",
data = {"parameters" : self.view.getCurrentParameters(),
"is_edit" : True}))
# Otherwise set the current selection back to previous selection.
# This will automatically send a 'new parameters' message.
else:
self.view.revertSelection()
else:
#
# If this is in response to a 'new parameters' message triggered by
# the editor then we don't want to update the previous parameters.
#
is_edit = message.getData()["is_edit"]
if not is_edit:
for response in message.getResponses():
data = response.getData()
if "old parameters" in data:
self.view.updatePreviousParameters(response.source,
data["old parameters"])
for response in message.getResponses():
data = response.getData()
if "new parameters" in data:
self.view.updateCurrentParameters(response.source,
data["new parameters"].copy())
# Let modules, such as feeds.feeds know that all of the modules
# have updated their parameters.
self.waiting_on = copy.copy(self.wait_for)
self.sendMessage(halMessage.HalMessage(m_type = "updated parameters",
data = {"parameters" : self.view.getCurrentParameters().copy()}))
elif message.isType("updated parameters"):
# No waits requested, so the parameter change is complete
if (len(self.waiting_on) == 0):
self.updateComplete()
def processMessage(self, message):
if message.isType("configure1"):
self.newMessage.emit(halMessage.HalMessage(source = self,
m_type = "add to ui",
data = self.configure_dict))
elif message.isType("configure2"):
self.view.copyDefaultParameters()
self.view.markCurrentAsInitialized()
elif message.isType("get parameters"):
p = self.view.getParameters(message.getData()["index or name"])
if p is None:
message.addResponse(halMessage.HalMessageResponse(source = self.module_name,
data = {"found" : False}))
else:
message.addResponse(halMessage.HalMessageResponse(source = self.module_name,
data = {"parameters" : p,
"found" : True}))
elif | |
from __future__ import (
absolute_import,
unicode_literals,
)
import collections
import random
import sys
import uuid
import attr
import six
from pysoa.client.expander import (
ExpansionConverter,
ExpansionSettings,
)
from pysoa.client.settings import PolymorphicClientSettings
from pysoa.common.metrics import TimerResolution
from pysoa.common.transport.exceptions import (
ConnectionError,
InvalidMessageError,
MessageReceiveError,
MessageReceiveTimeout,
MessageSendError,
MessageSendTimeout,
MessageTooLarge,
)
from pysoa.common.types import (
ActionRequest,
JobRequest,
JobResponse,
UnicodeKeysDict,
)
__all__ = (
'Client',
'ServiceHandler',
)
class ServiceHandler(object):
"""Does the low-level work of communicating with an individual service through its configured transport."""
def __init__(self, service_name, settings):
"""
:param service_name: The name of the service which this handler calls
:param settings: The client settings object for this service (and only this service)
"""
self.metrics = settings['metrics']['object'](**settings['metrics'].get('kwargs', {}))
with self.metrics.timer('client.transport.initialize', resolution=TimerResolution.MICROSECONDS):
self.transport = settings['transport']['object'](
service_name,
self.metrics,
**settings['transport'].get('kwargs', {})
)
with self.metrics.timer('client.middleware.initialize', resolution=TimerResolution.MICROSECONDS):
self.middleware = [
m['object'](**m.get('kwargs', {}))
for m in settings['middleware']
]
# Make sure the request counter starts at a random location to avoid clashing with other clients
# sharing the same connection
self.request_counter = random.randint(1, 1000000)
@staticmethod
def _make_middleware_stack(middleware, base):
"""
Given a list of in-order middleware callables `middleware`
and a base function `base`, chains them together so each middleware is
fed the function below, and returns the top level ready to call.
"""
for ware in reversed(middleware):
base = ware(base)
return base
def _base_send_request(self, request_id, meta, job_request, message_expiry_in_seconds=None):
with self.metrics.timer('client.send.excluding_middleware', resolution=TimerResolution.MICROSECONDS):
if isinstance(job_request, JobRequest):
job_request = attr.asdict(job_request, dict_factory=UnicodeKeysDict)
self.transport.send_request_message(request_id, meta, job_request, message_expiry_in_seconds)
def send_request(self, job_request, message_expiry_in_seconds=None):
"""
Send a JobRequest, and return a request ID.
The context and control_extra arguments may be used to include extra values in the
context and control headers, respectively.
:param job_request: The job request object to send
:type job_request: JobRequest
:param message_expiry_in_seconds: How soon the message will expire if not received by a server (defaults to
sixty seconds unless the settings are otherwise)
:type message_expiry_in_seconds: int
:return: The request ID
:rtype: int
:raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge
"""
request_id = self.request_counter
self.request_counter += 1
meta = {}
wrapper = self._make_middleware_stack(
[m.request for m in self.middleware],
self._base_send_request,
)
try:
with self.metrics.timer('client.send.including_middleware', resolution=TimerResolution.MICROSECONDS):
wrapper(request_id, meta, job_request, message_expiry_in_seconds)
return request_id
finally:
self.metrics.commit()
def _get_response(self, receive_timeout_in_seconds=None):
with self.metrics.timer('client.receive.excluding_middleware', resolution=TimerResolution.MICROSECONDS):
request_id, meta, message = self.transport.receive_response_message(receive_timeout_in_seconds)
if message is None:
return None, None
else:
return request_id, JobResponse(**message)
def get_all_responses(self, receive_timeout_in_seconds=None):
"""
Receive all available responses from the transport as a generator.
:param receive_timeout_in_seconds: How long to block without receiving a message before raising
`MessageReceiveTimeout` (defaults to five seconds unless the settings are
otherwise).
:type receive_timeout_in_seconds: int
:return: A generator that yields (request ID, job response)
:rtype: generator
:raise: ConnectionError, MessageReceiveError, MessageReceiveTimeout, InvalidMessage, StopIteration
"""
wrapper = self._make_middleware_stack(
[m.response for m in self.middleware],
self._get_response,
)
try:
while True:
with self.metrics.timer('client.receive.including_middleware', resolution=TimerResolution.MICROSECONDS):
request_id, response = wrapper(receive_timeout_in_seconds)
if response is None:
break
yield request_id, response
finally:
self.metrics.commit()
class Client(object):
"""
The `Client` provides a simple interface for calling actions on services and supports both sequential and
parallel action invocation.
"""
settings_class = PolymorphicClientSettings
handler_class = ServiceHandler
def __init__(self, config, expansion_config=None, settings_class=None, context=None):
"""
:param config: The entire client configuration dict, whose keys are service names and values are settings dicts
abiding by the `PolymorphicClientSettings` schema
:type config: dict
:param expansion_config: The optional expansion configuration dict, if this client supports expansions, which
is a dict abiding by the `ExpansionSettings` schema
:type expansion_config: dict
:param settings_class: An optional settings schema enforcement class or callable to use, which overrides the
default of `PolymorphicClientSettings`
:type settings_class: union[class, callable]
:param context: An optional base request context that will be used for all requests this client instance sends
(individual calls can add to and override the values supplied in this context dict)
:type: dict
"""
if settings_class:
self.settings_class = settings_class
self.context = context or {}
self.handlers = {}
self.settings = {}
self.config = config or {}
for service_name, service_config in self.config.items():
self.settings[service_name] = self.settings_class(service_config)
if expansion_config:
expansion_settings = ExpansionSettings(expansion_config)
self.expansion_converter = ExpansionConverter(
type_routes=expansion_settings['type_routes'],
type_expansions=expansion_settings['type_expansions'],
)
class FutureResponse(object):
"""
A future representing a retrievable response after sending a request.
"""
DelayedException = collections.namedtuple('DelayedException', ['tp', 'value', 'tb'])
def __init__(self, get_response):
self._get_response = get_response
self._response = None
self._raise = None
def result(self, timeout=None):
"""
Obtain the result of this future response.
The first time you call this method on a given future response, it will block for a response and then
either return the response or raise any errors raised by the response. You can specify an optional timeout,
which will override any timeout specified in the client settings or when calling the request method. If a
timeout occurs, `MessageReceiveTimeout` will be raised. It will not be cached, and you can attempt to call
this again, and those subsequent calls to `result` (or `exception`) will be treated like a first-time calls
until a response is returned or non-timeout error is raised.
The subsequent times you call this method on a given future response after obtaining a non-timeout response,
any specified timeout will be ignored, and the cached response will be returned (or the cached exception
re-raised).
:param timeout: If specified, the client will block for at most this many seconds waiting for a response.
If not specified, but a timeout was specified when calling the request method, the client
will block for at most that many seconds waiting for a response. If neither this nor the
request method timeout are specified, the configured timeout setting (or default of 5
seconds) will be used.
:type timeout: int
:return: The response
:rtype: union[ActionResponse, JobResponse, list[union[ActionResponse, JobResponse]],
generator[union[ActionResponse, JobResponse]]]
"""
if self._raise:
if six.PY2:
six.reraise(tp=self._raise.tp, value=self._raise.value, tb=self._raise.tb)
else:
# We do it this way because six.reraise adds extra traceback items in Python 3
raise self._raise.value.with_traceback(self._raise.tb)
if self._response:
return self._response
try:
self._response = self._get_response(timeout)
return self._response
except MessageReceiveTimeout:
raise
except Exception:
self._raise = self.DelayedException(*sys.exc_info())
raise
def exception(self, timeout=None):
"""
Obtain the exception raised by the call, blocking if necessary, per the rules specified in the
documentation for `result`. If the call completed without raising an exception, `None` is returned. If a
timeout occurs, `MessageReceiveTimeout` will be raised (not returned).
:param timeout: If specified, the client will block for at most this many seconds waiting for a response.
If not specified, but a timeout was specified when calling the request method, the client
will block for at most that many seconds waiting for a response. If neither this nor the
request method timeout are specified, the configured timeout setting (or default of 5
seconds) will be used.
:type timeout: int
:return: The exception
:rtype: Exception
"""
if self.running():
try:
self.result(timeout)
return None
except MessageReceiveTimeout:
raise
except Exception as e:
return e
if self._raise:
return self._raise.value
return None
def running(self):
"""
Returns `True` if the response (or exception) has not yet been obtained, `False` otherwise.
:return: Whether the request is believed to still be running (this is updated only when `result` or
`exception` is called).
"""
return not self.done()
def done(self):
"""
Returns `False` if the response (or exception) has not yet been obtained, `True` otherwise.
:return: Whether the request is known to be done (this is updated only when `result` or `exception` is
called).
"""
return bool(self._response or self._raise)
# Exceptions
class ImproperlyConfigured(Exception):
pass
class InvalidExpansionKey(Exception):
pass
class JobError(Exception):
"""
Raised by `Client.call_***` methods when a job response contains one or more job errors. Stores a list of
`Error` objects, and has a string representation cleanly displaying the errors.
"""
def __init__(self, errors=None):
"""
:param errors: The list of all errors in this job, available as an `errors` property on the exception
instance.
:type errors: list[Error]
"""
self.errors = errors or []
def __repr__(self):
return self.__str__()
def __str__(self):
errors_string = '\n'.join([str(e) for e in self.errors])
return 'Error executing job:\n{}'.format(errors_string)
class CallActionError(Exception):
"""
Raised by `Client.call_***` methods when a job response contains one or more action errors. Stores a list of
`ActionResponse` objects, and has a string representation cleanly displaying the actions' errors.
"""
def __init__(self, actions=None):
"""
:param actions: The list of all actions that have errors (not actions without | |
"x", y_name: "y"}).sort_values(by="x") for t in list(data["table"])]
l = list(data["label"])
s = list(data["speed"])
lt = zip(l, tables, s)
for m in lt:
setattr(m[1], "label", m[0])
setattr(m[1], "speed", m[2])
return tables
def df_to_plot2(data, x_name, y_name):
tables = [data[[x_name, y_name]].rename(columns={x_name: "x", y_name: "y"}).loc[data["codec"] == s].sort_values(by="x") for s in codecs]
lt = zip(codecs, tables)
for m in lt:
setattr(m[1], "label", codecs_short[m[0]])
return tables
#def composite_plot(data, xlabel, ylabel, savefile, xlim=None, ylim=None, log_inter=True, xlog=False, ylog=False, smooth=True, xlogscalar=False, ylogscalar=False, legend_loc=None, tikz_before=True):
#i1, ax1 = plt.subplots()
#if not (xlog or ylog):
#tikz_before = False
#if xlog:
#ax1.set_xscale('log')
#ax1.grid(True, which="both")
#if xlogscalar:
#ax1.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
#else:
#ax1.set_xscale('linear')
#ax1.grid(True)
#if ylog:
#ax1.set_yscale('log')
#ax1.grid(True, which="both")
#if ylogscalar:
#ax1.yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
#else:
#ax1.set_yscale('linear')
#ax1.grid(True)
#for table in data:
#if smooth:
#c = plt.scatter(table.x, table.y, label=table.label, marker="+")
#colr = c.get_facecolor()[0]
#if log_inter:
#lx = np.log(table.x)
#p = sc.interpolate.Akima1DInterpolator(lx, table.y)
#x_smooth = np.logspace(np.log10(min(table.x)), np.log10(max(table.x)), 200)
#else:
#lx = table.x
#p = sc.interpolate.Akima1DInterpolator(lx, table.y)
#x_smooth = np.linspace(min(table.x), max(table.x), 200)
#y_smooth = p(np.log(x_smooth))
#plt.plot(x_smooth, y_smooth, color=colr)
#else:
#plt.plot(table.x, table.y, label=table.label, marker="+")
#ax1.set(xlabel=xlabel, ylabel=ylabel)
#if legend_loc is None:
#ax1.legend()
#else:
#ax1.legend(loc=legend_loc)
#if xlim is True:
#ax1.set_xlim(left=table.x.min(), right=table.x.max())
#elif xlim is not None:
#ax1.set_xlim(left=xlim[0], right=xlim[1])
#if ylim is True:
#ax1.set_ylim(bottom=table.y.min(), top=table.y.max())
#elif ylim is not None:
#ax1.set_ylim(bottom=ylim[0], top=ylim[1])
#p = os.path.split(savefile)
#enc.create_dir(p[0] + '/svg/')
#enc.create_dir(p[0] + '/png/')
#enc.create_dir(p[0] + '/tex/')
#if tikz_before:
#tikzplotlib.save(f"{p[0] + '/tex/' + p[1]}.tex")
#plt.savefig(f"{p[0] + '/svg/' + p[1]}.svg")
#plt.savefig(f"{p[0] + '/png/' + p[1]}.png")
#if not tikz_before:
#tikzplotlib.save(f"{p[0] + '/tex/' + p[1]}.tex")
#plt.close(i1)
def composite_plot(data, xlabel, ylabel, savefile, xlim=None, ylim=None, log_inter=True, xlog=False, ylog=False, smooth=True, xlogscalar=False, ylogscalar=False, legend_loc=None, tikz_before=True):
plt.figure()
plt.axis()
if not (xlog or ylog):
tikz_before = False
if xlog:
plt.xscale('log')
plt.grid(True, which="both")
# if xlogscalar:
# plt.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
else:
plt.xscale('linear')
plt.grid(True)
if ylog:
plt.yscale('log')
plt.grid(True, which="both")
# if ylogscalar:
# plt.yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
else:
plt.yscale('linear')
plt.grid(True)
for table in data:
if smooth:
c = plt.scatter(table.x, table.y, label=table.label, marker="+")
colr = c.get_facecolor()[0]
if log_inter:
lx = np.log(table.x)
p = sc.interpolate.Akima1DInterpolator(lx, table.y)
x_smooth = np.logspace(np.log10(min(table.x)), np.log10(max(table.x)), 200)
else:
lx = table.x
p = sc.interpolate.Akima1DInterpolator(lx, table.y)
x_smooth = np.linspace(min(table.x), max(table.x), 200)
y_smooth = p(np.log(x_smooth))
plt.plot(x_smooth, y_smooth, color=colr)
else:
plt.plot(table.x, table.y, label=table.label, marker="+")
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(loc=legend_loc)
if xlim is True:
plt.xlim(left=table.x.min(), right=table.x.max())
elif xlim is not None:
plt.xlim(left=xlim[0], right=xlim[1])
if ylim is True:
plt.ylim(bottom=table.y.min(), top=table.y.max())
elif ylim is not None:
plt.ylim(bottom=ylim[0], top=ylim[1])
p = os.path.split(savefile)
enc.create_dir(p[0] + '/svg/')
enc.create_dir(p[0] + '/png/')
enc.create_dir(p[0] + '/tex/')
if tikz_before:
tikzplotlib.save(f"{p[0] + '/tex/' + p[1]}.tex")
plt.savefig(f"{p[0] + '/svg/' + p[1]}.svg")
plt.savefig(f"{p[0] + '/png/' + p[1]}.png")
if not tikz_before:
tikzplotlib.save(f"{p[0] + '/tex/' + p[1]}.tex")
plt.close()
def df_to_latex_table(values, save_path):
pass
def calc_bj(mxy_o, mlegend_o, bd_metric_legend, bd_rate_legend):
mxy = mxy_o.copy()
mlegend = mlegend_o.copy()
xy1 = mxy[mlegend.index(BJ1_serie)]
t1 = zip(*xy1)
x1, y1 = [list(t1) for t1 in t1]
mxy.remove(xy1)
mlegend.remove(BJ1_serie)
i = enc.count()
for m in mxy:
t = zip(*m)
x, y = [list(t) for t in t]
bd_metric = bj_delta(x1, y1, x, y, mode=0)
bd_rate = bj_delta(x1, y1, x, y, mode=1)
l = mlegend[next(i)]
print(f"{l}: BD-{bd_metric_legend}: {bd_metric}%")
print(f"{l}: BD-{bd_rate_legend}: {bd_rate}%")
def formatter1(x):
s = ('%1.2f' % x).replace(".",",") + "\,\%"
return s
def formatter2(x):
s = ('%1.2f' % x).replace(".",",") + "\%"
if x > 0:
s = "\cellcolor{red!25}" + s
elif x < 0:
s = "\cellcolor{green!25}" + s
return s
def calc_bj_cross_to_table(mxy_o, mlegend_o, bd_metric_legend, bd_rate_legend):
table_metric = pd.DataFrame(np.zeros((len(mlegend_o), len(mlegend_o))), columns=mlegend_o, index=mlegend_o)
table_rate = pd.DataFrame(np.zeros((len(mlegend_o), len(mlegend_o))), columns=mlegend_o, index=mlegend_o)
for mleg in mlegend_o:
mxy = mxy_o.copy()
mlegend = mlegend_o.copy()
xy1 = mxy[mlegend.index(mleg)]
t1 = zip(*xy1)
x1, y1 = [list(t1) for t1 in t1]
mxy.remove(xy1)
mlegend.remove(mleg)
i = enc.count()
for m in mxy:
t = zip(*m)
x, y = [list(t) for t in t]
bd_metric = bj_delta(x1, y1, x, y, mode=0)
bd_rate = bj_delta(x1, y1, x, y, mode=1)
l = mlegend[next(i)]
table_metric.loc[l, mleg] = bd_metric
table_rate.loc[l, mleg] = bd_rate
# print(table_metric.to_latex(float_format="%.2f", decimal=","))
# print(table_rate.to_latex(float_format="%.2f"))
return table_metric, table_rate
'''
def calc_bj_akima(dftable, x_name, y_name, bd_metric_legend, bd_rate_legend):
xy1 = mxy[mlegend.index(BJ1_serie)]
t1 = zip(*xy1)
x1, y1 = [list(t1) for t1 in t1]
mxy.remove(xy1)
mlegend.remove(BJ1_serie)
i = enc.count()
for m in mxy:
t = zip(*m)
x, y = [list(t) for t in t]
bd_metric = bj_delta_akima(x1, y1, x, y, mode=0)
bd_rate = bj_delta_akima(x1, y1, x, y, mode=1)
l = mlegend[next(i)]
print(f"{l}: BD-{bd_metric_legend}: {bd_metric}%")
print(f"{l}: BD-{bd_rate_legend}: {bd_rate}%")
'''
def calc_bj_akima(data, x_name, y_name, bd_metric_legend, bd_rate_legend):
df = data.copy()
for t in df.itertuples():
t.table.rename(columns={x_name: "x", y_name: "y"}).sort_values(by="x")
df
bd_metric = bj_delta_akima(x1, y1, x, y, mode=0)
bd_rate = bj_delta_akima(x1, y1, x, y, mode=1)
def read_table_kcolv(logpath):
with open(logpath, "r") as f:
firstline = next(f).rstrip(" \n")
columns = []
for x in firstline.rsplit(" "):
columns.append(x.rsplit(":")[0])
r = range(len(columns))
table = pd.read_table(logpath, names=columns, usecols=list(r), sep=" ",
converters={k: lambda x: (x.rsplit(":")[1]) for k in r})
return table.apply(pd.to_numeric)
class PSNR_values:
def __init__(self, logpath):
self.logpath = logpath
table = read_table_kcolv(self.logpath)
self.n = table.n
self.mse_avg = table.mse_avg
self.mse_y = table.mse_y
self.mse_u = table.mse_u
self.mse_v = table.mse_v
self.psnr_avg = table.psnr_avg
self.psnr_y = table.psnr_y
self.psnr_u = table.psnr_u
self.psnr_v = table.psnr_v
self.mse_avg_avg = np.average(self.mse_avg)
self.mse_y_avg = np.average(self.mse_y)
self.mse_u_avg = np.average(self.mse_u)
self.mse_v_avg = np.average(self.mse_v)
self.psnr_avg_avg = np.average(self.psnr_avg)
self.psnr_y_avg = np.average(self.psnr_y)
self.psnr_u_avg = np.average(self.psnr_u)
self.psnr_v_avg = np.average(self.psnr_v)
class SSIM_values:
def __init__(self, logpath):
self.logpath = logpath
names = ("n", "Y", "U", "V", "All", "unnorm")
table = pd.read_table(self.logpath, names=names, sep=" ",
converters={k: lambda x: (x.rsplit(":")[1]) for k in range(5)})
table.unnorm = table.unnorm.str.slice(start=1, stop=-1)
table = table.apply(pd.to_numeric)
self.n = table.n
self.Y = table.Y
self.U = table.U
self.V = table.V
self.All = table.All
self.unnorm = table.unnorm # unnorm = 10*log10(1-All)
self.Y_avg = np.average(self.Y)
self.U_avg = np.average(self.U)
self.V_avg = np.average(self.V)
self.All_avg = np.average(self.All)
self.unnorm_avg = np.average(self.unnorm)
class VMAF_values:
def __init__(self, logpath):
self.logpath = logpath
table = pd.read_table(logpath, sep=",")
table = table.loc[:, ~table.columns.str.contains('^Unnamed')]
self.table = table
self.vmaf_avg = table.vmaf.mean()
class Useage_values:
def __init__(self, logpath):
self.logpath = logpath
with open(logpath, "r") as log:
firstline = next(log)
self.row_names = firstline.rsplit(",")[0:-1]
table = pd.read_csv(self.logpath)
self.table = table
self.state_names = list(table.state.unique())
total_time = 0
total_cpu_time = 0
for state in [x for x in self.state_names if x not in encode_excluded_states]:
for row in self.row_names:
if row == "state":
pass
else:
arr = np.array(table[row][table.index[table['state'] == state]])
setattr(self, state + "_" + row, arr)
cpu_time_user = getattr(self, state + "_cpu_time_user")
cpu_time_user = np.append(np.array([0]), cpu_time_user)
cpu_time_system = getattr(self, state + "_cpu_time_system")
cpu_time_system = np.append(np.array([0]), cpu_time_system)
cpu_time_total = cpu_time_user + cpu_time_system
setattr(self, state + "_cpu_time_total", cpu_time_total)
cpu_time_diff = np.ediff1d(cpu_time_total)
time = np.append(np.array([0]), getattr(self, state + "_time"))
time_diff = np.ediff1d(time)
cpu_percent_calc = cpu_time_diff / time_diff
setattr(self, state + "_cpu_percent_calc", cpu_percent_calc)
total_time += time[-1]
total_cpu_time += cpu_time_total[-1]
self.total_time = total_time
self.total_cpu_time = total_cpu_time
cpu_time_diff = np.ediff1d(np.append(np.array([0]), np.array(table.cpu_time_user + table.cpu_time_system)))
time_diff = np.ediff1d(np.append(np.array([0]), np.array(table.time)))
cpu_time_int = np.sum(cpu_time_diff * time_diff)
self.cpu_usage_avg = cpu_time_int / total_time
self.max_RSS = self.table.RSS.max()
self.perc_RSS = self.table.RSS.quantile(0.9)
self.mean_RSS = self.table.RSS.mean()
self.med_RSS = self.table.RSS.median()
for row in self.row_names:
if row == "state":
pass
else:
arr = np.array(table[row][table.index[table['state'] == "measuring decode"]])
setattr(self, "decode_row_" + row, arr)
self.decode_time = self.decode_row_time[-1]
self.decode_cpu_time = self.decode_row_cpu_time_user[-1] + self.decode_row_cpu_time_system[-1]
class VideoFile:
def __init__(self, videofilepath):
self.videofilepath = videofilepath
self.basename = os.path.basename(videofilepath)
self.path_without_ext = os.path.splitext(videofilepath)[0]
if os.path.exists(self.path_without_ext + ".266"):
self.videofilepath = self.path_without_ext + ".266"
self.useage_log_path = self.path_without_ext + useage_log_suffix
if not os.path.isfile(self.useage_log_path):
print(f"File not found: {self.useage_log_path}")
self.useage_log_path = None
self.psnr_log_path = self.path_without_ext + psnr_log_suffix
if not os.path.isfile(self.psnr_log_path):
print(f"File not found: {self.psnr_log_path}")
self.psnr_log_path = None
self.ssim_log_path = self.path_without_ext + ssim_log_suffix
if not os.path.isfile(self.ssim_log_path):
print(f"File not found: {self.ssim_log_path}")
self.ssim_log_path = None
self.vmaf_log_path = self.path_without_ext + vmaf_log_suffix
if not os.path.isfile(self.vmaf_log_path):
print(f"File not found: {self.vmaf_log_path}")
self.vmaf_log_path = None
self.topserie = os.path.split(os.path.split(self.videofilepath)[0])[1]
# eg. /path/to/video/av1/cpu-used_4/ShakeNDry/ShakeNDry-crf10.mkv -> ShakeNDry
for c in codecs:
if c in videofilepath:
self.codec = c
s = os.path.split(os.path.split(self.videofilepath)[0])[0]
# eg. /path/to/video/av1/cpu-used_4/ShakeNDry -> /path/to/video/av1/cpu-used_4
self.serie = s[s.index(self.codec)+len(self.codec)+1:].replace("/", "-") + "-" + self.topserie
self.label = s[s.index(self.codec)+len(self.codec)+1:].replace("/", "-")
self.codec_serie = self.codec + "-" + self.serie
def load_log(self):
with cf.ThreadPoolExecutor() as executor:
if self.psnr_log_path is not None:
psnr = executor.submit(PSNR_values, self.psnr_log_path)
if self.ssim_log_path is not None:
ssim = executor.submit(SSIM_values, self.ssim_log_path)
if self.vmaf_log_path is not None:
vmaf = executor.submit(VMAF_values, self.vmaf_log_path)
if self.useage_log_path is not None:
useage = executor.submit(Useage_values, self.useage_log_path)
if self.psnr_log_path is not None:
self.psnr = psnr.result()
if self.ssim_log_path is not None:
self.ssim = ssim.result()
if self.vmaf_log_path is not None:
self.vmaf = vmaf.result()
if self.useage_log_path is not None:
self.useage = | |
in simulation:', sum_quantity, quantity, buy_rate])
sum_quantity = quantity
avg_price = buy_rate
# Real mode
else:
# Double-checking the quantity after we calculated the actual rate
if quantity > 0.0:
# Bitmex is a bit special (market making)
if robot.exchange == 'bitmex':
# Open a long or a short depending on the requested side if the number of contracts (calculated) > 0
if contracts > 0: # we can have a situation with non-zero quantity but zero contracts
# Process the order
if robot.short_flag:
robot.logger.lprint(['Contracts (short) {} buy_rate {}'.format(contracts, buy_rate)]) # DEBUG
buy_result = e_api.selllimit(robot.exchange, robot.market, None, buy_rate, contracts,
postonly=current_postonly_flag)
else:
robot.logger.lprint(['Contracts (long) {} buy_rate {}'.format(contracts, buy_rate)]) # DEBUG
buy_result = e_api.buylimit(robot.exchange, robot.market, None, buy_rate, contracts,
postonly=current_postonly_flag)
else: # if zero contracts to buy are left - finish buying
buy_flag = False
robot.sleep_buy_timer = 0
robot.logger.lprint(['Finished buying - zero contracts left'])
else: # other exchanges
if robot.short_flag:
robot.logger.lprint(['Quantity (short) {} buy_rate {}'.format(quantity, buy_rate)]) # DEBUG
buy_result = e_api.selllimit(robot.exchange, robot.market, quantity, buy_rate, postonly=current_postonly_flag)
else:
robot.logger.lprint(['Quantity (long) {} buy_rate {}'.format(quantity, buy_rate)]) # DEBUG
buy_result = e_api.buylimit(robot.exchange, robot.market, quantity, buy_rate, postonly=current_postonly_flag)
# Process buy results
# print "\n>>> Result", buy_result #DEBUG
robot.logger.lprint(["------\nResult:", buy_result, "\n------"]) # DEBUG #
if buy_result == 'MIN_TRADE_REQUIREMENT_NOT_MET':
# If trade requirement were not met or an error occured
buy_flag = False
robot.sleep_buy_timer = 0
send_chat_message(robot.user_id,
'Cancelling buying on ' + robot.market + ' as minimum trade requirements were not met')
elif buy_result == 'issue_unknown':
robot.logger.lprint(['An issue occured while submitting the order. Trying again.'])
elif buy_result == 'access_denied':
robot.logger.lprint(["Your API keys do not have proper access"])
send_chat_message(robot.user_id, 'Your API keys do not have proper access')
buy_flag = False
robot.sleep_buy_timer = 0
robot.wf_id = None
else: # If the results are ok
try:
if buy_result is not None:
buy_uuid = buy_result['uuid']
robot.logger.lprint(['>> Placed order', buy_uuid])
else: # sometimes the last result is None but previous orders were actually fine
err_msg = "{} user had None is buy order - follow up".format(robot.user_id)
issue_notify(robot, err_msg, only_admin=True)
buy_flag = False
robot.sleep_buy_timer = 0
robot.wf_id = None
except:
# If something else is wrong
buy_flag = False
robot.sleep_buy_timer = 0
err_msg = traceback.format_exc()
issue_notify(robot, err_msg)
# Also cancelling workflow until we figure out what's up
robot.wf_id = None
print('sleeping...')
b_test.sleep(robot.sleep_buy_timer)
else: # if quantity is zero
buy_flag = False
robot.sleep_buy_timer = 0
return buy_uuid, buy_flag, sum_quantity, quantity, avg_price
### Init: post-orders work. Calculating averages and updating the information / logging the results
def init_post_results(robot, sum_quantity, sum_paid, source_filled, avg_price):
if sum_quantity > 0:
if not robot.simulation:
if robot.exchange == 'bitmex':
if robot.market in config.primary_calc_markets: # robot.market == 'btc/usd':
avg_price = round(Decimal(sum_quantity) / Decimal(str(sum_paid)), 8) # cause we are buying contracts there
else:
avg_price = round(Decimal(sum_paid) / Decimal(str(sum_quantity)), 8) # cause we are buying contracts there
else: # for oanda
avg_price = round(Decimal(abs(sum_paid)) / Decimal(str(sum_quantity)), 8)
robot.logger.lprint(['Average price paid:', avg_price])
else:
# If simulation
sum_paid = robot.source_position
# Fix for the backtesting - we will just use ticker as an average price paid
if config.backtesting_enabled:
avg_price = robot.price
# Take absolute values if avg_price and spent as oanda returns negatives
avg_price = abs(avg_price)
source_filled = abs(source_filled)
# Description
if robot.short_flag:
direction_desc = 'short'
robot.entry_direction = 'red'
else:
direction_desc = 'long'
robot.entry_direction = 'green'
# Round the avg price for comms
avg_price_comm = round(float(avg_price), 2)
if robot.exchange == 'bitmex':
comm_string = "{}: orders completed on {}, opened a position for {} contracts. \n" \
"Direction: {}. \nAverage price: {}".format(
robot.market, robot.exchange, sum_quantity, direction_desc, avg_price_comm)
else:
comm_string = "{}: orders completed on {} at the average price {}.\n" \
"Position of {} unit(s).".format(robot.market, robot.exchange, avg_price_comm, sum_quantity)
send_chat_message(robot.user_id, comm_string)
robot.logger.lprint([comm_string])
# Updating twitter
if (robot.lambobot is not None) and not robot.simulation:
comm_string_twitter = "{}: opened a {} position. #algotrading".format(
robot.market.upper(), direction_desc, robot.core_strategy)
try: # just in case if this returns an error
robot.lambobot.post(comm_string_twitter)
except:
pass
# Updating workflow info if we have a workflow
if robot.wf_id is not None:
sql_string = "UPDATE workflow SET sum_q = '{}', avg_price = '{}' " \
"WHERE wf_id = {} AND userid = {} AND core_strategy = '{}' ".format(
sum_quantity, avg_price, robot.wf_id, robot.user_id, robot.core_strategy)
robot.job_id, rows = sql.query_lastrow_id(sql_string)
else: # if we have zero quantity as a result
send_chat_message(robot.user_id,
'{} ({}): buy order was cancelled, no positions initiated'.format(robot.exchange, robot.market))
robot.logger.lprint([robot.market, ': buy order was cancelled, no positions initiated'])
if robot.wf_id is not None:
sql_string = "DELETE FROM workflow WHERE wf_id = {} " \
"AND userid = {} AND core_strategy = '{}' ".format(
robot.wf_id, robot.user_id, robot.core_strategy)
sql.query(sql_string)
robot.wf_id = None
robot.terminate()
return sum_quantity, sum_paid, source_filled, avg_price
### Init: check workflow runs
def init_check_wf(robot, sql):
if robot.wf_id is not None:
sql_string = "SELECT * FROM workflow WHERE wf_id = '{}' " \
"AND userid = {} AND core_strategy = '{}' LIMIT 1".format(
robot.wf_id, robot.user_id, robot.core_strategy)
rows = sql.query(sql_string)
# Checks to handle duplicated jobs situation
try:
wf_info = rows[0]
except:
sql_string = "SELECT * FROM workflow WHERE market = '{}' " \
"AND userid = {} AND core_strategy = '{}' LIMIT 1".format(
robot.market.upper(), robot.user_id, robot.core_strategy)
rows = sql.query(sql_string)
try:
wf_info = rows[0]
except:
wf_info = None
chat_error_msg = 'Cannot launch the trading job for the market {}. ' \
'Please launch it using new command.'.format(robot.market)
send_chat_message(robot.user_id, chat_error_msg)
# Launching a job if required by workflow
if wf_info is not None:
wf_info = rows[0] # first result if existing
wf_info_market = wf_info[1]
wf_info_price = wf_info[8]
wf_stop_mode = wf_info[9]
wf_price_entry = wf_info[10]
robot.exchange_abbr = wf_info[11]
# Using actual average price as an entry: avg_price
if (wf_info_price is None) and (wf_price_entry is not None):
wf_info_price = wf_price_entry
# Deleting wf_id from the db
sql_string = "DELETE FROM workflow WHERE wf_id = {} " \
"AND userid = {} AND core_strategy = '{}' ".format(
robot.wf_id, robot.user_id, robot.core_strategy)
sql.query(sql_string)
launch_str = "params: process, {}, {}, {}, {}".format(wf_stop_mode, robot.exchange_abbr, wf_info_market, str(wf_info_price)) #DEBUG
robot.logger.lprint([launch_str])
if not b_test.backtesting:
robot.input('_', 'process', wf_stop_mode, robot.exchange_abbr, wf_info_market, str(wf_info_price))
else: # limitation of sell amount needed
robot.input('_', 'process', wf_stop_mode, robot.exchange_abbr, wf_info_market, str(wf_info_price), str(robot.source_position/robot.margin_level))
robot.run_program_mode = 'process'
else: # if there is no workflow task - stop
robot.run_continued = False
### USD - local curr rates update
def usd_rate_value(robot, e_api):
if robot.exchange == 'oanda':
if not robot.simulation:
usd_x_rate = e_api.getticker('oanda', 'AUD_USD')
else:
usd_x_rate = 1 # does not matter
# If market is closed
if usd_x_rate is None:
# a workaround to get the last known
usd_x_rate = robot.usd_x_rate_last
else:
robot.usd_x_rate_last = usd_x_rate
return usd_x_rate
else:
return None
### Main workflow: initiate a position
def buyer(robot, b_test):
# Restoring the timers etc, incl. postonly attempts
robot.assign_default_values()
# Update the prediction for buyer except for the case when it is called by reentry
if robot.entry_direction is None:
stop_reconfigure(robot, 'now', b_test = b_test)
else:
robot.logger.lprint(["Initiated from previous job. Direction {}, prediction {}, probability {}".format(
robot.entry_direction, robot.prediction, robot.prediction_probability)])
# Timestamp for initiation start #added_precision
robot.timestamp_start_initiate = b_test.time()
### Set up the margin (needed on on bitmex)
init_set_margin(robot, e_api)
### Price data analysis
robot.time_hour = b_test.strftime("%H")
robot.time_hour_comms = robot.time_hour
# Sleeping for a bit so that information on workflows is updated in the database just in case
b_test.sleep(int(30/robot.speedrun))
init_full_cycle(robot, sql) # full cycle mode handling
init_get_workflow(robot, sql) # check if this is a part of workflow (meaning that a job should be then launched)
### 1. Checking availability, balance
init_pre_check(robot, coinigy, b_test)
### 2. Start timer for price switching and handling simulation modes
robot.timer_init_start = b_test.time()
# Modes check
init_mode_check(robot)
# Default values and starting balances / contracts
robot.source_position = Decimal(str(robot.source_position))
# Checking balance
balance_check = ensure_balance(robot) # added to modify the quantity on the start
if balance_check is None:
robot.logger.lprint(["Invalid API keys"])
send_chat_message(robot.user_id, 'Invalid API keys - cannot check the balance')
robot.terminate()
robot.source_start = robot.source_position
initiate_position_launch = False # no launching just yet
# Default variables
robot.contracts_total = 0
buy_uuid = None
buy_flag = True
sum_paid, sum_quantity, source_filled, contracts, avg_price, buy_rate = 0, 0, 0, 0, | |
# Copyright 2021 Canonical Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Collection of Events for a test run.
Zaza is capable of running multiple bundles / test_runs in a single instance.
The Collection classes are to deal with:
* Events from the test functions that indicates stages of the test.
* Events from the Juju units (specifically scanning the logs) for hook events
and errors.
* Events from other compatible logging entities that need to be combined into
a coherent log for the collection.
There are several common fields used in the logging framework:
* collection - the name of the test (but could be anything)
* event - the thing that occurred.
Any other fields can be used as needed. Additionally, tags can be used which
result in a field of the form name=thing,name1=thing2.
At the highest level of abstraction, the objects in the collection need to be
able to 'give-up' their log files, so that they can be combined and stored as
a collection along with a manifest.
"""
import collections
from datetime import datetime
import logging
import tempfile
import sys
from zaza.global_options import get_option
from zaza.utilities import ConfigurableMixin
from .types import LogFormats
# Hold collections; probably there will only be one.
_collections = {}
logger = logging.getLogger(__name__)
def get_collection(name=None):
"""Return a collection by name.
Collections are available globally, and typically only one will be needed
per test. If the global options 'zaza-events.collection-name' is defined,
then that collection name is used for the collection. This is so a global
collection can be used within tests if wanted. Obviously, overriding
:param:`name` will force a particular collection to be returned.
This returns a named collection (a.k.a logging) for use in a module.
:returns: the colection named, or creates a new one of that name.
:rtype: Collection
"""
global _collections
if name is None:
name = get_option("zaza-events.collection-name", "DEFAULT")
try:
return _collections[name]
except KeyError:
pass
_collections[name] = Collection(name=name)
return _collections[name]
class Collection(ConfigurableMixin):
"""Collection of Log Events.
The collection manages a set of event loggers and associated log files.
The final set of files is provided in a Manifest object.
And event logger object needs the following methods:
configure(**kwargs) -> None
finalise(self) -> None
get_manifest(self) -> ManifestBase
clean_up(self) -> None
These are used to control the logging on the units, in the following
manner.
configure(logs_dir=...) -- indicate where to put the logs.
finalise() -- ensure that the logs are complete and
pullable.
get_manifest() -- fetch a manifest of all the log files.
clean_up() -- do any clean-up as we've finished.
If the logs_dir is not configured then a temporary log directory will be
created with tempfile.mkdtemp() -- this will stick around after the program
is completed.
"""
def __init__(self, **kwargs):
"""Initialise a collection of related log files."""
self.name = None
self.collection = None
self.description = None
self.logs_dir = None
self.log_format = None
self._event_managers = []
self.configure(**kwargs)
if self.log_format is None:
self.log_format = LogFormats.InfluxDB
def _ensure_logs_dir(self):
"""Ensure that the logs_dir is set.
Also ensure the directory exists.
"""
if self.logs_dir is not None:
return
self.logs_dir = tempfile.mkdtemp()
def add_logging_manager(self, manager):
"""Add a logging manager to the collection.
The manager implements the PluginManagerBase class which allows the
collection to configure the logs dir, collection name, log format, and
other items.
:param manager: the plugin manager that manages the thing recording
events.
:type manager: PluginManagerBase
"""
if manager in self._event_managers:
logger.debug(
"Collection: adding manager %s more than once, ignoring.",
manager)
return
self._event_managers.append(manager)
if self.log_format is None:
self.log_format = LogFormats.InfluxDB
manager.configure(collection_object=self)
self._ensure_logs_dir()
manager.configure_plugin()
def finalise(self):
"""Finalise the logging collection.
This finalises ALL of the event managers that are connected to this
collection. Depending on the plugin this may terminate / gather logs
from multiple sources; i.e. it may take a while.
"""
for manager in self._event_managers:
manager.finalise()
def log_files(self):
"""Return iterator of (name, type, filename).
This is from all of the managers.
:returns: A list/iterator of tuples of (name, type, filename)
:rtype: Iterator[Tuple[str, str, str]]
"""
for manager in self._event_managers:
yield from manager.log_files()
def events(self, sort=True, precision="us", strip_precision=True):
"""Provide a context manager that returns an iterator of events.
Designed to be used as:
with collection.events() as events:
for event in events:
# do something with the (filename, event)
If sort is True, then the events are sorted, otherwise they are just
returned in the order of files from :method:`log_files`.
Note that all the log files should be the same format. If, not, the
collection is broken and an error will probably occur.
This uses the log_file() iterator on this class to produce a complete
list of log files; they all must provide the same log format.
The precision is used to normalise the precision across all of the
events to the same precision and then strip the precision indicator
ready for upload to InfluxDB.
:param sort: if True, then a sorted stream is returned.
:type sort: bool
:param precision: the precision to use; (default ms)
:type precision: str
:param strip_precision: If True, do no re-add the precision at the end.
:type strip_precision: bool
:returns: context manager
:rtype: Iterator[(str, str)]
:raises AssertionError: if the logs are all the same type.
"""
specs = list(self.log_files())
if not specs:
return Streamer([], self.log_format)
type_ = specs[0][1]
if not all(t[1] == type_ for t in specs[1:]):
raise AssertionError("Not all specs match {}".format(type_))
files = [s[2] for s in specs]
return Streamer(files, self.log_format, sort=sort,
precision=precision, strip_precision=strip_precision)
def clean_up(self):
"""Tell all the managed plugins to clean-up."""
for manager in self._event_managers:
manager.clean_up()
def reset(self):
"""Reset the collection so that it can be used again.
This calls reset on all the contained managers, before removing them.
It then resets the configuration so that it can be reset.
"""
for manager in self._event_managers:
manager.reset()
self.collection = None
self.description = None
self.logs_dir = None
self.log_format = None
for manager in self._event_managers:
manager.collection_object = None
self._event_managers = []
class Streamer:
"""An context manager for with that streams from multiple log files."""
def __init__(self, files, log_format, sort=True, precision="us",
strip_precision=True):
"""Initialise the object.
precision must be of of ("s", "ms", "us", "ns")
:param files: a list of files
:type files: List[str]
:param log_format: one of CSV, LOG, InfluxDB
:type log_format: str
:param sort: whether to sort the logs by date order.
:type sort: bool
:param precision: the precision to use; (default ms)
:type precision: str
:param strip_precision: If True, do no re-add the precision at the end.
:type strip_precision: bool
:returns: Iterator[(str, str)]
:raises: AssertionError if precision is not valid.
"""
self.files = files
self.log_format = log_format
self.sort = sort
self.handles = None
self.precision = precision
self.strip_precision = strip_precision
assert precision in ("s", "ms", "us", "ns")
def __enter__(self):
"""Set it up."""
# open the files to handles
handles = collections.OrderedDict()
for f in self.files:
try:
handles[f] = open(f)
except (FileNotFoundError, OSError) as e:
logger.warning("Couldn't open log file: %s: %s", f, str(e))
self.handles = handles
return self._iterator()
def __exit__(self, _, __, ___):
"""Exit, just ensure that all the handles are closed."""
for f, h in self.handles.items():
try:
h.close()
except Exception as e:
logger.warning("Exception on closing %s: %s", f, str(e))
return False
def _iterator(self):
# Whilst we still have open files.
currents = []
# Get the first set of currents [(timestamp, filename, event)]
for f, h in self.handles.copy().items():
try:
line = h.readline()
if line:
event = line.rstrip()
currents.append(
(_parse_date(self.log_format, event), f, event))
else:
self.handles[f].close()
del self.handles[f]
except OSError as e:
logger.warning("Couldn't read log file: %s: %s", f, str(e))
self.handles[f].close()
del self.handles[f]
if self.sort:
currents.sort(key=lambda i: i[0])
# Now whilst |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.