input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
<gh_stars>1-10
from congregation.lang import *
from congregation.dag import Dag
from congregation.dag.nodes.internal import *
from congregation.comp import compile_dag
from tests.utils import create_cols, compare_to_expected
import pytest
"""
Tests for correct propagation of the following relation-level
and column-level attributes after the Pushdown, PushUp, InsertCloseOps,
InsertOpenOps, InsertReadOps, and InsertStoreOps phases of the compiler
have been run:
- DAG node order
- node.requires_mpc() attribute
- relation-level stored_with sets
- column-level plaintext sets
- column-level trust_with sets
"""
@pytest.mark.parametrize("party_data, expected", [
(
[
{
"col_names": ["a", "b"],
"stored_with": {1, 2},
"plaintext_sets": [set(), set()],
"trust_with_sets": [set(), set()]
}
],
{
"node_order": [Create, AggregateMean, Open, Read, Divide, Collect],
"requires_mpc": [True, True, True, False, False],
"ownership_data":[
{
"stored_with": [{1, 2}],
"plaintext_sets": [set(), set()],
"trust_with_sets": [set(), set()]
},
{
"stored_with": [{1, 2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
}
]
}
)
])
def test_agg_mean(party_data, expected):
cols_in_one = create_cols(party_data[0])
rel_one = create("in1", cols_in_one, party_data[0]["stored_with"])
agg = aggregate(rel_one, "agg", party_data[0]["col_names"][:1], party_data[0]["col_names"][1], "mean")
div = divide(agg, "div", party_data[0]["col_names"][1], [10])
collect(div, {1, 2})
d = Dag({rel_one})
compile_dag(d)
compare_to_expected(d, expected)
@pytest.mark.parametrize("party_data, expected", [
(
[
{
"col_names": ["a", "b"],
"stored_with": {1},
"plaintext_sets": [{1}, {1}],
"trust_with_sets": [{1}, {1}]
},
{
"col_names": ["c", "d"],
"stored_with": {2},
"plaintext_sets": [{2}, {2}],
"trust_with_sets": [{2}, {2}]
}
],
{
"node_order": [
Create,
Create,
AggregateSumSquaresAndCount,
Store,
Close,
AggregateSumSquaresAndCount,
Store,
Close,
Concat,
AggregateStdDev,
Open,
Read,
AggregateStdDevLocalSqrt,
Multiply,
Collect
],
"requires_mpc": [
False, False, False,
False, True, False,
False, True, True,
True, True, False,
False, False
],
"ownership_data":[
{
"stored_with": [{1}],
"plaintext_sets": [{1}, {1}],
"trust_with_sets": [{1}, {1}]
},
{
"stored_with": [{2}],
"plaintext_sets": [{2}, {2}],
"trust_with_sets": [{2}, {2}]
},
{
"stored_with": [{1}],
"plaintext_sets": [{1}, {1}, {1}, {1}],
"trust_with_sets": [{1}, {1}, {1}, {1}]
},
{
"stored_with": [{1}],
"plaintext_sets": [{1}, {1}, {1}, {1}],
"trust_with_sets": [{1}, {1}, {1}, {1}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1}, {1}, {1}, {1}],
"trust_with_sets": [{1}, {1}, {1}, {1}]
},
{
"stored_with": [{2}],
"plaintext_sets": [{2}, {2}, {2}, {2}],
"trust_with_sets": [{2}, {2}, {2}, {2}]
},
{
"stored_with": [{2}],
"plaintext_sets": [{2}, {2}, {2}, {2}],
"trust_with_sets": [{2}, {2}, {2}, {2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{2}, {2}, {2}, {2}],
"trust_with_sets": [{2}, {2}, {2}, {2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [set(), set(), set(), set()],
"trust_with_sets": [set(), set(), set(), set()]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
}
]
}
),
(
[
{
"col_names": ["a", "b"],
"stored_with": {1},
"plaintext_sets": [{1}, {1}],
"trust_with_sets": [{1, 2}, {1}]
},
{
"col_names": ["c", "d"],
"stored_with": {2},
"plaintext_sets": [{2}, {2}],
"trust_with_sets": [{2}, {2}]
}
],
{
"node_order": [
Create,
Create,
AggregateSumSquaresAndCount,
Store,
Close,
AggregateSumSquaresAndCount,
Store,
Close,
Concat,
AggregateStdDev,
Open,
Read,
AggregateStdDevLocalSqrt,
Multiply,
Collect
],
"requires_mpc": [
False, False, False,
False, True, False,
False, True, True,
True, True, False,
False, False
],
"ownership_data":[
{
"stored_with": [{1}],
"plaintext_sets": [{1}, {1}],
"trust_with_sets": [{1, 2}, {1}]
},
{
"stored_with": [{2}],
"plaintext_sets": [{2}, {2}],
"trust_with_sets": [{2}, {2}]
},
{
"stored_with": [{1}],
"plaintext_sets": [{1}, {1}, {1}, {1}],
"trust_with_sets": [{1, 2}, {1}, {1}, {1, 2}]
},
{
"stored_with": [{1}],
"plaintext_sets": [{1}, {1}, {1}, {1}],
"trust_with_sets": [{1, 2}, {1}, {1}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1}, {1}, {1}, {1}],
"trust_with_sets": [{1, 2}, {1}, {1}, {1, 2}]
},
{
"stored_with": [{2}],
"plaintext_sets": [{2}, {2}, {2}, {2}],
"trust_with_sets": [{2}, {2}, {2}, {2}]
},
{
"stored_with": [{2}],
"plaintext_sets": [{2}, {2}, {2}, {2}],
"trust_with_sets": [{2}, {2}, {2}, {2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{2}, {2}, {2}, {2}],
"trust_with_sets": [{2}, {2}, {2}, {2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [set(), set(), set(), set()],
"trust_with_sets": [{2}, set(), set(), {2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
}
]
}
),
(
[
{
"col_names": ["a", "b"],
"stored_with": {1, 2},
"plaintext_sets": [set(), set()],
"trust_with_sets": [set(), set()]
},
{
"col_names": ["c", "d"],
"stored_with": {1, 2},
"plaintext_sets": [set(), set()],
"trust_with_sets": [set(), set()]
}
],
{
"node_order": [
Create,
Create,
Concat,
AggregateStdDev,
Open,
Read,
AggregateStdDevLocalSqrt,
Multiply,
Collect
],
"requires_mpc": [True, True, True, True, True, False, False, False, False],
"ownership_data":[
{
"stored_with": [{1, 2}],
"plaintext_sets": [set(), set()],
"trust_with_sets": [set(), set()]
},
{
"stored_with": [{1, 2}],
"plaintext_sets": [set(), set()],
"trust_with_sets": [set(), set()]
},
{
"stored_with": [{1, 2}],
"plaintext_sets": [set(), set()],
"trust_with_sets": [set(), set()]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}]
}
]
}
)
])
def test_agg_std_dev(party_data, expected):
cols_in_one = create_cols(party_data[0])
cols_in_two = create_cols(party_data[1])
rel_one = create("in1", cols_in_one, party_data[0]["stored_with"])
rel_two = create("in2", cols_in_two, party_data[1]["stored_with"])
cc = concat([rel_one, rel_two], "concat", party_data[0]["col_names"])
std_dev = aggregate(cc, "std_dev", [party_data[0]["col_names"][0]], party_data[0]["col_names"][1], "std_dev")
mult = multiply(std_dev, "mult", party_data[0]["col_names"][0], [party_data[0]["col_names"][1], 7])
collect(mult, {1, 2})
d = Dag({rel_one, rel_two})
compile_dag(d)
compare_to_expected(d, expected)
@pytest.mark.parametrize("party_data, expected", [
(
[
{
"col_names": ["a", "b"],
"stored_with": {1},
"plaintext_sets": [{1}, {1}],
"trust_with_sets": [{1}, {1}]
},
{
"col_names": ["c", "d"],
"stored_with": {2},
"plaintext_sets": [{2}, {2}],
"trust_with_sets": [{2}, {2}]
}
],
{
"node_order": [
Create,
Create,
AggregateSumSquaresAndCount,
Store,
Close,
AggregateSumSquaresAndCount,
Store,
Close,
Concat,
AggregateVariance,
Open,
Read,
AggregateVarianceLocalDiff,
Multiply,
Collect
],
"requires_mpc": [
False, False, False,
False, True, False,
False, True, True,
True, True, False,
False, False
],
"ownership_data":[
{
"stored_with": [{1}],
"plaintext_sets": [{1}, {1}],
"trust_with_sets": [{1}, {1}]
},
{
"stored_with": [{2}],
"plaintext_sets": [{2}, {2}],
"trust_with_sets": [{2}, {2}]
},
{
"stored_with": [{1}],
"plaintext_sets": [{1}, {1}, {1}, {1}],
"trust_with_sets": [{1}, {1}, {1}, {1}]
},
{
"stored_with": [{1}],
"plaintext_sets": [{1}, {1}, {1}, {1}],
"trust_with_sets": [{1}, {1}, {1}, {1}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1}, {1}, {1}, {1}],
"trust_with_sets": [{1}, {1}, {1}, {1}]
},
{
"stored_with": [{2}],
"plaintext_sets": [{2}, {2}, {2}, {2}],
"trust_with_sets": [{2}, {2}, {2}, {2}]
},
{
"stored_with": [{2}],
"plaintext_sets": [{2}, {2}, {2}, {2}],
"trust_with_sets": [{2}, {2}, {2}, {2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{2}, {2}, {2}, {2}],
"trust_with_sets": [{2}, {2}, {2}, {2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [set(), set(), set(), set()],
"trust_with_sets": [set(), set(), set(), set()]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}, {1, 2}]
},
{
"stored_with": [{1}, {2}],
"plaintext_sets": [{1, 2}, {1, 2}, {1, 2}],
"trust_with_sets": [{1, 2}, {1, 2}, {1, 2}]
},
{
| |
from itertools import count
import os
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import imageio
import math
from .plot_ss import *
def count_line(file_path):
with open(file_path) as f:
for line_count, _ in enumerate(f, 1):
pass
return line_count
def read_file(path):
list = []
with open(path, mode='r') as f:
reader = csv.reader(f)
# Start from line 1, line 1 not existed
for row in reader:
list.append(row)
f.close()
return list
def read_line(path, idx):
with open(path, mode='r') as f:
reader = csv.reader(f)
# Start from line 1, line 1 not existed
line_count = 1
while True:
if line_count == idx:
break
f.readline()
line_count += 1
return next(reader)
''' Return last line number and its content '''
def read_last_line(file_path):
line_count = count_line(file_path)
return read_line(file_path, line_count)
''' Turn string to colored np.array '''
def str_to_color_nparray(str):
color = str[0]
np_str = str[3:-1].split()
# print("str:{} re:{}".format(str, str[1:-1]))
l = [float(np_str[i]) for i in range(len(np_str))]
return color, np.array(l)
''' Bar plot with array and corresponding nums '''
def bar_plot(array, nums):
fig, ax = plt.subplots()
ax.bar(nums, array)
# ax.ste_xticks(nums)
return fig, ax
''' Get a calculated or integrated list for one exact shepherd agent number shepherding with all combination of sheep agent numbers, averaged by multiple trails '''
def get_full_list(directory_path, shepherd_nums):
list = []
# Shepherd number starts from 1
for i in shepherd_nums:
file_path = directory_path + "/data/" + str(i) + ".csv"
list.append(read_file(file_path))
return list
def cal_success(list):
length = len(list)
succ_length = length
succ_list = []
for line in list:
if line[3] == 'False':
succ_length -= 1
else:
succ_list.append(line)
succ_rate = succ_length / length
return succ_rate, succ_list
'''
Integrate trials with same configuration for shepherd number and sheep number
Calculate average time and distance in succeeded trial, appending success rate
'''
def integrate_trial(list):
res = [None] * 8;
res[0] = list[0][0];
res[1] = list[0][1];
res[2] = list[0][2];
succ_rate, succ_list = cal_success(list)
# Change to success rate instead of success boolean flag
res[3] = succ_rate
if len(succ_list) > 0:
# step index
res[4] = sum(int(line[-2]) for line in succ_list) / len(succ_list)
var_line = []
for line in succ_list:
var_line.append(int(line[-2]))
res[5] = np.var(var_line)
# distance index
res[6] = sum(int(line[-1]) for line in succ_list) / len(succ_list)
var_line = []
for line in succ_list:
var_line.append(int(line[-1]))
res[7] = np.var(var_line)
else:
res[4] = 0
res[5] = 0
res[6] = 0
res[7] = 0
res[4] = str(math.ceil(res[4]))
res[5] = str(math.ceil(res[5]))
res[6] = str(math.ceil(res[6]))
res[7] = str(math.ceil(res[7]))
return res
''' Summarize each trial information from the last step at each csv file, repeat for trial number '''
def full_csv(directory_path, shepherd_num, sheep_num, trials):
with open("{}/data/{}.csv".format(directory_path, shepherd_num), mode='a') as f:
list=[]
for trial in trials:
file_path = "{}/data/{}sh{}tr{}.csv".format(directory_path, shepherd_num, sheep_num, trial)
line = read_last_line(file_path)
list.append(line)
int_list = integrate_trial(list)
writer = csv.writer(f)
writer.writerow(int_list)
# Draw graphs below
''' Output all result data into one result.csv '''
def write_csv(directory_path, file_path, shepherd_nums):
with open(file_path, mode='a') as f:
list=[]
# Add csv head
list.append(['shepherd', 'sheep', 'method', 'rate', 'step', 'var_step', 'distance', 'var_dis'])
for shepherd_num in shepherd_nums:
file_path = "{}/data/{}.csv".format(directory_path, shepherd_num)
line = read_last_line(file_path)
list.append(line)
writer = csv.writer(f)
writer.writerows(list)
''' Draw average movement distance for shepherds in trials '''
def success_rate_plot(directory_path, shepherd_nums, sheep_nums, full_list):
graph_path = directory_path + '/graph/rate'
os.makedirs(graph_path)
base = shepherd_nums[0]
for i in shepherd_nums:
list = full_list[i - base]
arr = np.reshape(np.take(np.asarray(list), [3], axis=1), -1).astype(np.float).tolist()
df = pd.DataFrame({"sheep number": sheep_nums, "success rate": arr})
ax = sns.barplot(x="sheep number", y="success rate", palette="Blues_r", data=df)
ax.set_title('average success rate when shepherd number is ' + str(i))
fig_path_sh = "{}/shd{}.png".format(graph_path, str(i))
plt.savefig(fig_path_sh)
ax.clear()
base = sheep_nums[0]
for i in sheep_nums:
list = []
for j in range(len(full_list)):
list.append(full_list[j][i - base])
arr = np.reshape(np.take(np.asarray(list), [3], axis=1), -1).astype(np.float).tolist()
df = pd.DataFrame({"shepherd number": shepherd_nums, "success rate": arr})
ax = sns.barplot(x="shepherd number", y="success rate", palette="Blues_r", data=df)
ax.set_title('average success rate when sheep number is ' + str(i))
fig_path_sh = "{}/shp{}.png".format(graph_path, str(i))
plt.savefig(fig_path_sh)
ax.clear()
return
''' Draw success time for shepherds in trials '''
def success_time_plot(directory_path, shepherd_nums, sheep_nums, full_list):
graph_path = directory_path + '/graph/succ'
os.makedirs(graph_path)
base = shepherd_nums[0]
for i in shepherd_nums:
list = full_list[i - base]
arr = np.reshape(np.take(np.asarray(list), [-2], axis=1), -1).astype(np.float).tolist()
df = pd.DataFrame({"sheep number": sheep_nums, "success steps": arr})
ax = sns.barplot(x="sheep number", y="success steps", palette="Blues_r", data=df)
ax.set_title('success steps for sheep when shepherd number is ' + str(i))
fig_path_sh = "{}/shd{}.png".format(graph_path, str(i))
plt.savefig(fig_path_sh)
ax.clear()
base = sheep_nums[0]
for i in sheep_nums:
list = []
for j in range(len(full_list)):
list.append(full_list[j][i - base])
arr = np.reshape(np.take(np.asarray(list), [-2], axis=1), -1).astype(np.float).tolist()
df = pd.DataFrame({"shepherd number": shepherd_nums, "success steps": arr})
ax = sns.barplot(x="shepherd number", y="success steps", palette="Blues_r", data=df)
ax.set_title('success steps for shepherd when sheep number is ' + str(i))
fig_path_sh = "{}/shp{}.png".format(graph_path, str(i))
plt.savefig(fig_path_sh)
ax.clear()
return
''' Draw average movement distance for shepherds in trials '''
def average_distance_plot(directory_path, shepherd_nums, sheep_nums, full_list):
graph_path = directory_path + '/graph/dis'
os.makedirs(graph_path)
base = shepherd_nums[0]
for i in shepherd_nums:
list = full_list[i - base]
arr = np.reshape(np.take(np.asarray(list), [-1], axis=1), -1).astype(np.float).tolist()
df = pd.DataFrame({"sheep number": sheep_nums, "average distance": arr})
ax = sns.barplot(x="sheep number", y="average distance", palette="Blues_r", data=df)
ax.set_title('average shepherding distance for sheep when shepherd number is ' + str(i))
fig_path_sh = "{}/shd{}.png".format(graph_path, str(i))
plt.savefig(fig_path_sh)
ax.clear()
base = sheep_nums[0]
for i in sheep_nums:
list = []
for j in range(len(full_list)):
list.append(full_list[j][i - base])
arr = np.reshape(np.take(np.asarray(list), [-1], axis=1), -1).astype(np.float).tolist()
df = pd.DataFrame({"shepherd number": shepherd_nums, "average distance": arr})
ax = sns.barplot(x="shepherd number", y="average distance", palette="Blues_r", data=df)
ax.set_title('average shepherding distance for shepherd when sheep number is ' + str(i))
fig_path_sh = "{}/shp{}.png".format(graph_path, str(i))
plt.savefig(fig_path_sh)
ax.clear()
return
''' Composite plot only for shepherds '''
def composite_plot(directory_path, shepherd_nums, sheep_nums, full_list):
graph_path = directory_path + '/graph/compo'
os.makedirs(graph_path)
base = shepherd_nums[0]
base = sheep_nums[0]
for i in sheep_nums:
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(24, 8), sharex=True)
list = []
for j in range(len(full_list)):
list.append(full_list[j][i - base])
arr = np.reshape(np.take(np.asarray(list), [3], axis=1), -1).astype(np.float).tolist()
df = pd.DataFrame({"success rate": arr})
sns.lineplot(data=df, ax=ax1)
list = []
for j in range(len(full_list)):
list.append(full_list[j][i - base])
arr = np.reshape(np.take(np.asarray(list), [-2], axis=1), -1).astype(np.float).tolist()
df = pd.DataFrame({"success steps": arr})
sns.lineplot(data=df, ax=ax2)
list = []
for j in range(len(full_list)):
list.append(full_list[j][i - base])
arr = np.reshape(np.take(np.asarray(list), [-1], axis=1), -1).astype(np.float).tolist()
df = pd.DataFrame({"average distance": arr})
sns.lineplot(data=df, ax=ax3)
plt.suptitle('sheep number is ' + str(i))
fig_path_sh = "{}/shp{}.png".format(graph_path, str(i))
plt.savefig(fig_path_sh)
ax1.clear()
ax2.clear()
ax3.clear()
return
''' Plot all graphs here '''
def plot_graph(directory_path, shepherd_nums, sheep_nums):
sns.set(style="white")
full_list = get_full_list(directory_path, shepherd_nums)
print(full_list)
success_rate_plot(directory_path, shepherd_nums, sheep_nums, full_list)
success_time_plot(directory_path, shepherd_nums, sheep_nums, full_list)
average_distance_plot(directory_path, shepherd_nums, sheep_nums, full_list)
composite_plot(directory_path, shepherd_nums, sheep_nums, full_list)
def generate_gif_csv(png_path, gif_file_path, frame):
''' Generate gif by existed graphs '''
frames_path = png_path + "/{i}.png"
with imageio.get_writer(gif_file_path, mode='I', fps=10) as writer:
for i in range(frame):
writer.append_data(imageio.imread(frames_path.format(i=i)))
# def csv_trace(directory_path, shepherd_nums, sheep_nums, trial_nums, param):
# csv_one_trace(directory_path, shepherd_nums[-1], sheep_nums[-1], trial_nums, param)
''' Draw one trace gif through graphs by trace '''
''' shepherd number, sheep number and trial number are all above 0 '''
def first_graph_plot(directory_path, shepherd_num, sheep_num, trial_nums, param):
sns.set(style="white")
min_line = 10000
trial_num = trial_nums[0]
for i in trial_nums:
file_path = directory_path + "/data/" + "{}sh{}tr{}.csv".format(str(shepherd_num), str(sheep_num), str(i))
cnt = count_line(file_path)
# if cnt > max_line:
# max_line = cnt
# trial_num = i
if cnt < min_line:
min_line = cnt
trial_num = i
print(trial_num)
file_path = directory_path + "/data/" + "{}sh{}tr{}.csv".format(str(shepherd_num), str(sheep_num), str(trial_num))
with open(file_path) as f:
reader = csv.reader(f, delimiter=',')
# For trace
sheeps_color = []
sheeps_pos = []
shepherds_color = []
shepherds_pos = []
index = 0
# row_num = 0
log_png_path = directory_path + "/png/{}sh{}tr{}".format(str(shepherd_num), str(sheep_num), str(trial_num))
os.makedirs(log_png_path)
for row in reader:
# For trace
sheeps_color_row = []
sheeps_pos_row = []
shepherds_color_row = []
shepherds_pos_row = []
for i in range(0, sheep_num):
color, pos = str_to_color_nparray(row[i])
sheeps_color.append(color)
sheeps_pos.append(pos)
sheeps_color_row.append(color)
sheeps_pos_row.append(pos)
for i in range(sheep_num, sheep_num + shepherd_num):
color, pos = str_to_color_nparray(row[i])
shepherds_color.append(color)
shepherds_pos.append(pos)
shepherds_color_row.append(color)
shepherds_pos_row.append(pos)
fig_row, ax_row = plt.subplots(figsize=(8,8))
init_plot_line_csv_spec(ax_row, param, sheeps_color_row, sheeps_pos_row, shepherds_color_row, shepherds_pos_row)
fig_path = log_png_path + "/{}_0.pdf".format(str(index))
# print(fig_path)
fig_row.savefig(fig_path)
index += 1
ax_row.clear()
plt.clf()
plt.close()
break
f.close()
return
""" Generate a gif for shortest trace """
def csv_trace(directory_path, shepherd_num, sheep_num, trial_nums, param):
sns.set(style="white")
max_line = 0
# Set 10000 as default minimun line number
min_line = 10000
trial_num = trial_nums[0]
for i in trial_nums:
file_path | |
(
"ERROR! XSDataResultBioSaxsHPLCv1_0 constructor argument 'subtractedCurve' is not XSDataFile but %s"
% self._subtractedCurve.__class__.__name__
)
raise BaseException(strMessage)
if autoRg is None:
self._autoRg = None
elif autoRg.__class__.__name__ == "XSDataAutoRg":
self._autoRg = autoRg
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0 constructor argument 'autoRg' is not XSDataAutoRg but %s"
% self._autoRg.__class__.__name__
)
raise BaseException(strMessage)
if gnom is None:
self._gnom = None
elif gnom.__class__.__name__ == "XSDataGnom":
self._gnom = gnom
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0 constructor argument 'gnom' is not XSDataGnom but %s"
% self._gnom.__class__.__name__
)
raise BaseException(strMessage)
if volume is None:
self._volume = None
elif volume.__class__.__name__ == "XSDataDoubleWithUnit":
self._volume = volume
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0 constructor argument 'volume' is not XSDataDoubleWithUnit but %s"
% self._volume.__class__.__name__
)
raise BaseException(strMessage)
if hplcFile is None:
self._hplcFile = None
elif hplcFile.__class__.__name__ == "XSDataFile":
self._hplcFile = hplcFile
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0 constructor argument 'hplcFile' is not XSDataFile but %s"
% self._hplcFile.__class__.__name__
)
raise BaseException(strMessage)
if mergedCurves is None:
self._mergedCurves = []
elif mergedCurves.__class__.__name__ == "list":
self._mergedCurves = mergedCurves
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0 constructor argument 'mergedCurves' is not list but %s"
% self._mergedCurves.__class__.__name__
)
raise BaseException(strMessage)
if hplcImage is None:
self._hplcImage = None
elif hplcImage.__class__.__name__ == "XSDataFile":
self._hplcImage = hplcImage
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0 constructor argument 'hplcImage' is not XSDataFile but %s"
% self._hplcImage.__class__.__name__
)
raise BaseException(strMessage)
if summedIntensity is None:
self._summedIntensity = None
elif summedIntensity.__class__.__name__ == "XSDataDouble":
self._summedIntensity = summedIntensity
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0 constructor argument 'summedIntensity' is not XSDataDouble but %s"
% self._summedIntensity.__class__.__name__
)
raise BaseException(strMessage)
if timeStamp is None:
self._timeStamp = None
elif timeStamp.__class__.__name__ == "XSDataTime":
self._timeStamp = timeStamp
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0 constructor argument 'timeStamp' is not XSDataTime but %s"
% self._timeStamp.__class__.__name__
)
raise BaseException(strMessage)
if rti is None:
self._rti = None
elif rti.__class__.__name__ == "XSDataRamboTainer":
self._rti = rti
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0 constructor argument 'rti' is not XSDataRamboTainer but %s"
% self._rti.__class__.__name__
)
raise BaseException(strMessage)
# Methods and properties for the 'bufferCurve' attribute
def getBufferCurve(self):
return self._bufferCurve
def setBufferCurve(self, bufferCurve):
if bufferCurve is None:
self._bufferCurve = None
elif bufferCurve.__class__.__name__ == "XSDataFile":
self._bufferCurve = bufferCurve
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.setBufferCurve argument is not XSDataFile but %s"
% bufferCurve.__class__.__name__
)
raise BaseException(strMessage)
def delBufferCurve(self):
self._bufferCurve = None
bufferCurve = property(
getBufferCurve, setBufferCurve, delBufferCurve, "Property for bufferCurve"
)
# Methods and properties for the 'subtractedCurve' attribute
def getSubtractedCurve(self):
return self._subtractedCurve
def setSubtractedCurve(self, subtractedCurve):
if subtractedCurve is None:
self._subtractedCurve = None
elif subtractedCurve.__class__.__name__ == "XSDataFile":
self._subtractedCurve = subtractedCurve
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.setSubtractedCurve argument is not XSDataFile but %s"
% subtractedCurve.__class__.__name__
)
raise BaseException(strMessage)
def delSubtractedCurve(self):
self._subtractedCurve = None
subtractedCurve = property(
getSubtractedCurve,
setSubtractedCurve,
delSubtractedCurve,
"Property for subtractedCurve",
)
# Methods and properties for the 'autoRg' attribute
def getAutoRg(self):
return self._autoRg
def setAutoRg(self, autoRg):
if autoRg is None:
self._autoRg = None
elif autoRg.__class__.__name__ == "XSDataAutoRg":
self._autoRg = autoRg
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.setAutoRg argument is not XSDataAutoRg but %s"
% autoRg.__class__.__name__
)
raise BaseException(strMessage)
def delAutoRg(self):
self._autoRg = None
autoRg = property(getAutoRg, setAutoRg, delAutoRg, "Property for autoRg")
# Methods and properties for the 'gnom' attribute
def getGnom(self):
return self._gnom
def setGnom(self, gnom):
if gnom is None:
self._gnom = None
elif gnom.__class__.__name__ == "XSDataGnom":
self._gnom = gnom
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.setGnom argument is not XSDataGnom but %s"
% gnom.__class__.__name__
)
raise BaseException(strMessage)
def delGnom(self):
self._gnom = None
gnom = property(getGnom, setGnom, delGnom, "Property for gnom")
# Methods and properties for the 'volume' attribute
def getVolume(self):
return self._volume
def setVolume(self, volume):
if volume is None:
self._volume = None
elif volume.__class__.__name__ == "XSDataDoubleWithUnit":
self._volume = volume
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.setVolume argument is not XSDataDoubleWithUnit but %s"
% volume.__class__.__name__
)
raise BaseException(strMessage)
def delVolume(self):
self._volume = None
volume = property(getVolume, setVolume, delVolume, "Property for volume")
# Methods and properties for the 'hplcFile' attribute
def getHplcFile(self):
return self._hplcFile
def setHplcFile(self, hplcFile):
if hplcFile is None:
self._hplcFile = None
elif hplcFile.__class__.__name__ == "XSDataFile":
self._hplcFile = hplcFile
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.setHplcFile argument is not XSDataFile but %s"
% hplcFile.__class__.__name__
)
raise BaseException(strMessage)
def delHplcFile(self):
self._hplcFile = None
hplcFile = property(getHplcFile, setHplcFile, delHplcFile, "Property for hplcFile")
# Methods and properties for the 'mergedCurves' attribute
def getMergedCurves(self):
return self._mergedCurves
def setMergedCurves(self, mergedCurves):
if mergedCurves is None:
self._mergedCurves = []
elif mergedCurves.__class__.__name__ == "list":
self._mergedCurves = mergedCurves
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.setMergedCurves argument is not list but %s"
% mergedCurves.__class__.__name__
)
raise BaseException(strMessage)
def delMergedCurves(self):
self._mergedCurves = None
mergedCurves = property(
getMergedCurves, setMergedCurves, delMergedCurves, "Property for mergedCurves"
)
def addMergedCurves(self, value):
if value is None:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.addMergedCurves argument is None"
)
raise BaseException(strMessage)
elif value.__class__.__name__ == "XSDataFile":
self._mergedCurves.append(value)
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.addMergedCurves argument is not XSDataFile but %s"
% value.__class__.__name__
)
raise BaseException(strMessage)
def insertMergedCurves(self, index, value):
if index is None:
strMessage = "ERROR! XSDataResultBioSaxsHPLCv1_0.insertMergedCurves argument 'index' is None"
raise BaseException(strMessage)
if value is None:
strMessage = "ERROR! XSDataResultBioSaxsHPLCv1_0.insertMergedCurves argument 'value' is None"
raise BaseException(strMessage)
elif value.__class__.__name__ == "XSDataFile":
self._mergedCurves[index] = value
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.addMergedCurves argument is not XSDataFile but %s"
% value.__class__.__name__
)
raise BaseException(strMessage)
# Methods and properties for the 'hplcImage' attribute
def getHplcImage(self):
return self._hplcImage
def setHplcImage(self, hplcImage):
if hplcImage is None:
self._hplcImage = None
elif hplcImage.__class__.__name__ == "XSDataFile":
self._hplcImage = hplcImage
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.setHplcImage argument is not XSDataFile but %s"
% hplcImage.__class__.__name__
)
raise BaseException(strMessage)
def delHplcImage(self):
self._hplcImage = None
hplcImage = property(
getHplcImage, setHplcImage, delHplcImage, "Property for hplcImage"
)
# Methods and properties for the 'summedIntensity' attribute
def getSummedIntensity(self):
return self._summedIntensity
def setSummedIntensity(self, summedIntensity):
if summedIntensity is None:
self._summedIntensity = None
elif summedIntensity.__class__.__name__ == "XSDataDouble":
self._summedIntensity = summedIntensity
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.setSummedIntensity argument is not XSDataDouble but %s"
% summedIntensity.__class__.__name__
)
raise BaseException(strMessage)
def delSummedIntensity(self):
self._summedIntensity = None
summedIntensity = property(
getSummedIntensity,
setSummedIntensity,
delSummedIntensity,
"Property for summedIntensity",
)
# Methods and properties for the 'timeStamp' attribute
def getTimeStamp(self):
return self._timeStamp
def setTimeStamp(self, timeStamp):
if timeStamp is None:
self._timeStamp = None
elif timeStamp.__class__.__name__ == "XSDataTime":
self._timeStamp = timeStamp
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.setTimeStamp argument is not XSDataTime but %s"
% timeStamp.__class__.__name__
)
raise BaseException(strMessage)
def delTimeStamp(self):
self._timeStamp = None
timeStamp = property(
getTimeStamp, setTimeStamp, delTimeStamp, "Property for timeStamp"
)
# Methods and properties for the 'rti' attribute
def getRti(self):
return self._rti
def setRti(self, rti):
if rti is None:
self._rti = None
elif rti.__class__.__name__ == "XSDataRamboTainer":
self._rti = rti
else:
strMessage = (
"ERROR! XSDataResultBioSaxsHPLCv1_0.setRti argument is not XSDataRamboTainer but %s"
% rti.__class__.__name__
)
raise BaseException(strMessage)
def delRti(self):
self._rti = None
rti = property(getRti, setRti, delRti, "Property for rti")
def export(self, outfile, level, name_="XSDataResultBioSaxsHPLCv1_0"):
showIndent(outfile, level)
outfile.write(unicode("<%s>\n" % name_))
self.exportChildren(outfile, level + 1, name_)
showIndent(outfile, level)
outfile.write(unicode("</%s>\n" % name_))
def exportChildren(self, outfile, level, name_="XSDataResultBioSaxsHPLCv1_0"):
XSDataResultBioSaxsProcessOneFilev1_0.exportChildren(
self, outfile, level, name_
)
if self._bufferCurve is not None:
self.bufferCurve.export(outfile, level, name_="bufferCurve")
if self._subtractedCurve is not None:
self.subtractedCurve.export(outfile, level, name_="subtractedCurve")
if self._autoRg is not None:
self.autoRg.export(outfile, level, name_="autoRg")
if self._gnom is not None:
self.gnom.export(outfile, level, name_="gnom")
if self._volume is not None:
self.volume.export(outfile, level, name_="volume")
if self._hplcFile is not None:
self.hplcFile.export(outfile, level, name_="hplcFile")
for mergedCurves_ in self.getMergedCurves():
mergedCurves_.export(outfile, level, name_="mergedCurves")
if self._hplcImage is not None:
self.hplcImage.export(outfile, level, name_="hplcImage")
if self._summedIntensity is not None:
self.summedIntensity.export(outfile, level, name_="summedIntensity")
if self._timeStamp is not None:
self.timeStamp.export(outfile, level, name_="timeStamp")
if self._rti is not None:
self.rti.export(outfile, level, name_="rti")
def build(self, node_):
for child_ in node_.childNodes:
nodeName_ = child_.nodeName.split(":")[-1]
self.buildChildren(child_, nodeName_)
def buildChildren(self, child_, nodeName_):
if child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "bufferCurve":
obj_ = XSDataFile()
obj_.build(child_)
self.setBufferCurve(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "subtractedCurve":
obj_ = XSDataFile()
obj_.build(child_)
self.setSubtractedCurve(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "autoRg":
obj_ = XSDataAutoRg()
obj_.build(child_)
self.setAutoRg(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "gnom":
obj_ = XSDataGnom()
obj_.build(child_)
self.setGnom(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "volume":
obj_ = XSDataDoubleWithUnit()
obj_.build(child_)
self.setVolume(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and nodeName_ == "hplcFile":
obj_ = XSDataFile()
| |
"▁ille": 35192,
"▁knows": 35193,
"▁lapse": 35194,
"▁mere": 35195,
"▁perdi": 35196,
"▁pirat": 35197,
"▁procedure": 35198,
"▁reserve": 35199,
"▁sep": 35200,
"▁supply": 35201,
"▁travers": 35202,
"▁vegeta": 35203,
"▁vitam": 35204,
"▁xal": 35205,
"▁Ä": 35206,
"丘": 35207,
"奈": 35208,
"庫": 35209,
"战": 35210,
"業": 35211,
"而": 35212,
"黒": 35213,
"ETER": 35214,
"JUM": 35215,
"briga": 35216,
"bás": 35217,
"diko": 35218,
"ibig": 35219,
"ilte": 35220,
"ján": 35221,
"kiwa": 35222,
"ktan": 35223,
"kutan": 35224,
"ldu": 35225,
"liği": 35226,
"making": 35227,
"object": 35228,
"olah": 35229,
"profit": 35230,
"saky": 35231,
"slag": 35232,
"tarra": 35233,
"város": 35234,
"īr": 35235,
"ər": 35236,
"▁Berita": 35237,
"▁Herren": 35238,
"▁Kromě": 35239,
"▁Lidl": 35240,
"▁Mulai": 35241,
"▁STOR": 35242,
"▁Schnee": 35243,
"▁Situation": 35244,
"▁Sveta": 35245,
"▁dag": 35246,
"▁documents": 35247,
"▁garde": 35248,
"▁intel": 35249,
"▁jel": 35250,
"▁logis": 35251,
"▁marques": 35252,
"▁muller": 35253,
"▁pian": 35254,
"▁primary": 35255,
"▁provincial": 35256,
"▁puc": 35257,
"▁pupil": 35258,
"▁relative": 35259,
"▁smil": 35260,
"▁vice": 35261,
"▁Я": 35262,
"商": 35263,
"紫": 35264,
"護": 35265,
"陶": 35266,
"순": 35267,
"ANDE": 35268,
"MINA": 35269,
"Szolnok": 35270,
"[27]": 35271,
"aithe": 35272,
"bī": 35273,
"duci": 35274,
"honor": 35275,
"ificat": 35276,
"slim": 35277,
"zán": 35278,
"ág": 35279,
"ızı": 35280,
"υ": 35281,
"да": 35282,
"ша": 35283,
"▁Alimenta": 35284,
"▁Klassen": 35285,
"▁Laat": 35286,
"▁MOTOR": 35287,
"▁Reka": 35288,
"▁Savaş": 35289,
"▁Signore": 35290,
"▁adja": 35291,
"▁annan": 35292,
"▁cottage": 35293,
"▁disrupt": 35294,
"▁explo": 35295,
"▁failure": 35296,
"▁flora": 35297,
"▁impun": 35298,
"▁lever": 35299,
"▁moderat": 35300,
"▁occasion": 35301,
"▁plain": 35302,
"▁plans": 35303,
"▁posti": 35304,
"▁prolong": 35305,
"▁rayon": 35306,
"▁saja": 35307,
"▁salle": 35308,
"▁sting": 35309,
"▁tabac": 35310,
"▁trib": 35311,
"▁Д": 35312,
"中國": 35313,
"劍": 35314,
"号": 35315,
"彝": 35316,
"条": 35317,
"比": 35318,
"满": 35319,
"第": 35320,
"辛": 35321,
"ULI": 35322,
"UTU": 35323,
"WIE": 35324,
"caj": 35325,
"intu": 35326,
"ivel": 35327,
"kammer": 35328,
"kanal": 35329,
"lizar": 35330,
"ologic": 35331,
"ové": 35332,
"pista": 35333,
"utas": 35334,
"ář": 35335,
"äll": 35336,
"ňov": 35337,
"▁Biblioteca": 35338,
"▁Brø": 35339,
"▁Cinta": 35340,
"▁Hajdú": 35341,
"▁Konu": 35342,
"▁Loka": 35343,
"▁Magas": 35344,
"▁Miet": 35345,
"▁Salud": 35346,
"▁amplia": 35347,
"▁bem": 35348,
"▁beni": 35349,
"▁bere": 35350,
"▁climb": 35351,
"▁cuento": 35352,
"▁curso": 35353,
"▁duodecim": 35354,
"▁entertainment": 35355,
"▁environmental": 35356,
"▁fault": 35357,
"▁fixed": 35358,
"▁ideal": 35359,
"▁kamer": 35360,
"▁leaders": 35361,
"▁mez": 35362,
"▁moka": 35363,
"▁rebel": 35364,
"▁sleeping": 35365,
"▁soci": 35366,
"▁verb": 35367,
"▁vient": 35368,
"傅": 35369,
"先生": 35370,
"曾": 35371,
"济": 35372,
"目": 35373,
"精": 35374,
"維": 35375,
"羊": 35376,
"豐": 35377,
"AGU": 35378,
"[29]": 35379,
"aliya": 35380,
"automat": 35381,
"baptis": 35382,
"cional": 35383,
"clé": 35384,
"czę": 35385,
"dhani": 35386,
"gesetz": 35387,
"irane": 35388,
"itev": 35389,
"jav": 35390,
"luse": 35391,
"lám": 35392,
"machen": 35393,
"messen": 35394,
"nili": 35395,
"ovom": 35396,
"prawi": 35397,
"saker": 35398,
"shuv": 35399,
"training": 35400,
"vaa": 35401,
"zava": 35402,
"āk": 35403,
"ıldı": 35404,
"ной": 35405,
"“,": 35406,
"▁Agirre": 35407,
"▁Antal": 35408,
"▁BDP": 35409,
"▁Financi": 35410,
"▁Fuerza": 35411,
"▁Kanad": 35412,
"▁Kein": 35413,
"▁Mutlu": 35414,
"▁Muu": 35415,
"▁Ravn": 35416,
"▁Supremo": 35417,
"▁Søren": 35418,
"▁Sür": 35419,
"▁União": 35420,
"▁blot": 35421,
"▁bread": 35422,
"▁cannot": 35423,
"▁chud": 35424,
"▁comfort": 35425,
"▁executive": 35426,
"▁genital": 35427,
"▁kava": 35428,
"▁kes": 35429,
"▁kira": 35430,
"▁kuru": 35431,
"▁messa": 35432,
"▁nge": 35433,
"▁plen": 35434,
"▁psy": 35435,
"▁raid": 35436,
"▁recognition": 35437,
"▁taga": 35438,
"▁towards": 35439,
"▁transversal": 35440,
"▁vegan": 35441,
"▁weekend": 35442,
"せ": 35443,
"ぶ": 35444,
"ボ": 35445,
"モン": 35446,
"宏": 35447,
"庆": 35448,
"彭": 35449,
"訓": 35450,
"雙": 35451,
";;;": 35452,
"KARI": 35453,
"abban": 35454,
"acak": 35455,
"dud": 35456,
"fada": 35457,
"monat": 35458,
"tambul": 35459,
"áns": 35460,
"æl": 35461,
"łą": 35462,
"şen": 35463,
"šen": 35464,
"ич": 35465,
"ш": 35466,
"▁Agencia": 35467,
"▁Argument": 35468,
"▁Consul": 35469,
"▁Hör": 35470,
"▁Kadın": 35471,
"▁Kamran": 35472,
"▁Kupa": 35473,
"▁Nepali": 35474,
"▁Politik": 35475,
"▁Stora": 35476,
"▁Sök": 35477,
"▁Usp": 35478,
"▁anonym": 35479,
"▁assign": 35480,
"▁attend": 35481,
"▁blackjack": 35482,
"▁blank": 35483,
"▁buru": 35484,
"▁cada": 35485,
"▁cancel": 35486,
"▁citron": 35487,
"▁develop": 35488,
"▁dj": 35489,
"▁dk": 35490,
"▁día": 35491,
"▁ended": 35492,
"▁enne": 35493,
"▁govern": 35494,
"▁hale": 35495,
"▁lange": 35496,
"▁listed": 35497,
"▁luta": 35498,
"▁partnership": 35499,
"▁sabah": 35500,
"▁sacra": 35501,
"▁scio": 35502,
"▁stran": 35503,
"▁sve": 35504,
"▁tertia": 35505,
"▁tissu": 35506,
"▁tsara": 35507,
"丰": 35508,
"乃": 35509,
"印": 35510,
"好": 35511,
"工": 35512,
"得": 35513,
"徳": 35514,
"浩": 35515,
"物": 35516,
"程": 35517,
"輝": 35518,
"静": 35519,
"首": 35520,
"魚": 35521,
"EKO": 35522,
"aani": 35523,
"application": 35524,
"asach": 35525,
"dhā": 35526,
"entrepreneur": 35527,
"gún": 35528,
"hnen": 35529,
"incl": 35530,
"information": 35531,
"integr": 35532,
"iskola": 35533,
"miste": 35534,
"mäki": 35535,
"potent": 35536,
"rodz": 35537,
"skem": 35538,
"stup": 35539,
"tuh": 35540,
"ubah": 35541,
"uš": 35542,
"vaja": 35543,
"vansa": 35544,
"zł": 35545,
"ṅg": 35546,
"▁21.00": 35547,
"▁Acum": 35548,
"▁Desde": 35549,
"▁Kirche": 35550,
"▁Kosova": 35551,
"▁Standart": 35552,
"▁Tamam": 35553,
"▁Wissenschaft": 35554,
"▁accelerat": 35555,
"▁alias": 35556,
"▁apartment": 35557,
"▁brief": 35558,
"▁brought": 35559,
"▁cod": 35560,
"▁contacto": 35561,
"▁dicks": 35562,
"▁einer": 35563,
"▁entire": 35564,
"▁eux": 35565,
"▁gott": 35566,
"▁kwe": 35567,
"▁liter": 35568,
"▁livre": 35569,
"▁mechanism": 35570,
"▁mujer": 35571,
"▁profit": 35572,
"▁psycholog": 35573,
"▁replica": 35574,
"▁reserva": 35575,
"▁sapa": 35576,
"▁skelet": 35577,
"▁sover": 35578,
"▁transmission": 35579,
"▁İstanbul": 35580,
"▁κ": 35581,
"▁Не": 35582,
"ピ": 35583,
"切": 35584,
"天皇": 35585,
"柔": 35586,
"淳": 35587,
"與": 35588,
"血": 35589,
"間": 35590,
"HOLD": 35591,
"LIST": 35592,
"[26]": 35593,
"airean": 35594,
"ceiro": 35595,
"ennus": 35596,
"histoire": 35597,
"keer": 35598,
"mentar": 35599,
"ovati": 35600,
"rakan": 35601,
"rocz": 35602,
"rę": 35603,
"shadi": 35604,
"txo": 35605,
"uur": 35606,
"ytu": 35607,
"ába": 35608,
"▁Cuir": 35609,
"▁Deste": 35610,
"▁Evet": 35611,
"▁Fler": 35612,
"▁Hå": 35613,
"▁Hög": 35614,
"▁Kaar": 35615,
"▁Kasar": 35616,
"▁Kaynak": 35617,
"▁País": 35618,
"▁Samas": 35619,
"▁Schlag": 35620,
"▁Tä": 35621,
"▁affect": 35622,
"▁alte": 35623,
"▁arro": 35624,
"▁barrio": 35625,
"▁bottle": 35626,
"▁chemical": 35627,
"▁factors": 35628,
"▁haya": 35629,
"▁illa": 35630,
"▁insert": 35631,
"▁khmer": 35632,
"▁musi": 35633,
"▁pami": 35634,
"▁pane": 35635,
"▁planeta": 35636,
"▁propra": 35637,
"▁pw": 35638,
"▁spectrum": 35639,
"▁terms": 35640,
"▁theatr": 35641,
"▁typu": 35642,
"▁uch": 35643,
"▁usually": 35644,
"▁Örebro": 35645,
"▁사랑": 35646,
"城区": 35647,
"影": 35648,
"律": 35649,
"文化": 35650,
"未": 35651,
"祿": 35652,
"謝": 35653,
"運": 35654,
"雍": 35655,
"aynay": 35656,
"branche": 35657,
"cidos": 35658,
"danie": 35659,
"dled": 35660,
"estate": 35661,
"mpah": 35662,
"ologica": 35663,
"positum": 35664,
"programm": 35665,
"raich": 35666,
"stimul": 35667,
"suche": 35668,
"suu": 35669,
"tyka": 35670,
"vlak": 35671,
"řez": 35672,
"ɨ": 35673,
"ία": 35674,
"ส": 35675,
"▁16.30": 35676,
"▁Antti": 35677,
"▁Arbeit": 35678,
"▁Boek": 35679,
"▁Engin": 35680,
"▁Evolu": 35681,
"▁Onze": 35682,
"▁Principi": 35683,
"▁Recept": 35684,
"▁Valmi": 35685,
"▁assessment": 35686,
"▁caz": 35687,
"▁conserva": 35688,
"▁damn": 35689,
"▁doit": 35690,
"▁gaur": 35691,
"▁grub": 35692,
"▁induc": 35693,
"▁lana": 35694,
"▁manufacture": 35695,
"▁melon": 35696,
"▁meteor": 35697,
"▁pati": 35698,
"▁prompt": 35699,
"▁sonora": 35700,
"▁swart": 35701,
"▁tato": 35702,
"▁tit": 35703,
"▁treti": 35704,
"▁verse": 35705,
"▁Über": 35706,
"制": 35707,
"奕": 35708,
"屋": 35709,
"斯": 35710,
"殷": 35711,
"車": 35712,
"양": 35713,
"GOR": 35714,
"LAH": 35715,
"PUL": 35716,
"VÉ": 35717,
"amore": 35718,
"américa": 35719,
"elwa": 35720,
"inig": 35721,
"jiri": 35722,
"jte": 35723,
"licita": 35724,
"mä": 35725,
"njak": 35726,
"okon": 35727,
"poko": 35728,
"renta": 35729,
"tages": 35730,
"telen": 35731,
"zê": 35732,
"ônica": 35733,
"ʁ": 35734,
"τ": 35735,
"во": 35736,
"▁Caixa": 35737,
"▁Få": 35738,
"▁Juo": 35739,
"▁Romsdal": 35740,
"▁Segui": 35741,
"▁Utara": 35742,
"▁Vasta": 35743,
"▁alert": 35744,
"▁algo": 35745,
"▁appear": 35746,
"▁arsenal": 35747,
"▁augusta": 35748,
"▁avion": 35749,
"▁certain": 35750,
"▁claim": 35751,
"▁collaboration": 35752,
"▁colors": 35753,
"▁contro": 35754,
"▁corps": 35755,
"▁cree": 35756,
"▁fals": 35757,
"▁gee": 35758,
"▁gode": 35759,
"▁gê": 35760,
"▁hoog": 35761,
"▁mend": 35762,
"▁mesa": 35763,
"▁mezzo": 35764,
"▁nicht": 35765,
"▁obez": 35766,
"▁polic": 35767,
"▁rak": 35768,
"▁stuck": 35769,
"▁tale": 35770,
"▁tender": 35771,
"▁İmam": 35772,
"▁χ": 35773,
"▁Ф": 35774,
"▁『": 35775,
"イン": 35776,
"用": 35777,
"苗": 35778,
"노": 35779,
"스": 35780,
"요": 35781,
"운": 35782,
"LEI": 35783,
"NDE": 35784,
"OTI": 35785,
"TINA": 35786,
"boks": 35787,
"eč": 35788,
"ikku": 35789,
"imento": 35790,
"kinder": 35791,
"kraut": 35792,
"musik": 35793,
"natt": 35794,
"onnan": 35795,
"qon": 35796,
"rzec": 35797,
"santa": 35798,
"taal": 35799,
"ticas": 35800,
"ður": 35801,
"▁Akershus": 35802,
"▁Buiten": 35803,
"▁Penedès": 35804,
"▁Pontifici": 35805,
"▁Subscribe": 35806,
"▁aid": 35807,
"▁arrange": 35808,
"▁augusti": 35809,
"▁coll": 35810,
"▁contribution": 35811,
"▁developed": 35812,
"▁didn": 35813,
"▁emotion": 35814,
"▁getting": 35815,
"▁gloria": 35816,
"▁graf": 35817,
"▁hija": 35818,
"▁however": 35819,
"▁lovely": 35820,
"▁merge": 35821,
"▁missing": 35822,
"▁ose": 35823,
"▁patru": 35824,
"▁pogon": 35825,
"▁rapid": 35826,
"▁residen": 35827,
"▁romantic": 35828,
"▁simila": 35829,
"▁spor": 35830,
"▁trainer": 35831,
| |
#!/usr/bin/env python
'''
Dashboard: A graphical interface to view the status of various systems on
NaviGator and a control panel to interact with the running system.
'''
import functools
import os
from navigator_alarm import AlarmListener
from navigator_msgs.msg import Hosts, Host
from python_qt_binding import QtCore
from python_qt_binding import QtGui
from python_qt_binding import loadUi
from qt_gui.plugin import Plugin
from remote_control_lib import RemoteControl
from rosgraph_msgs.msg import Clock
import rospkg
import rospy
from std_msgs.msg import Float32, String
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2016, MIL"
__license__ = "MIT"
class Dashboard(Plugin):
def __init__(self, context):
super(Dashboard, self).__init__(context)
# Create the widget and name it
self._widget = QtGui.QWidget()
self._widget.setObjectName("Dashboard")
self.setObjectName("Dashboard")
# Extend the widget with all attributes and children in the UI file
ui_file = os.path.join(rospkg.RosPack().get_path("navigator_gui"), "resource", "dashboard.ui")
loadUi(ui_file, self._widget)
self.is_killed = False
self.remote = RemoteControl("dashboard")
self.remote.is_timed_out = True
# Creates dictionaries that are used by the monitor functions to keep track of their node or service
node_monitor_template = {
"received": "Unknown",
"stamp": rospy.Time.now(),
"cached": "Unknown",
}
self.operating_mode = node_monitor_template.copy()
self.battery_voltage = node_monitor_template.copy()
self.battery_voltage["cached_warning_color"] = "red"
self.system_time = node_monitor_template.copy()
del self.system_time["stamp"]
self.system_time["timeout_count"] = 0
self.hosts = node_monitor_template.copy()
self.clear_hosts()
self.hosts["cached"] = self.hosts["received"]
self.connect_ui()
self.connect_ros()
# Show _widget.windowTitle on left-top of each plugin (when it's set in _widget). This is useful when you open multiple
# plugins at once. Also if you open multiple instances of your plugin at once, these lines add number to make it easy to
# tell from pane to pane.
if context.serial_number() > 1:
self._widget.setWindowTitle(self._widget.windowTitle() + (" (%d)" % context.serial_number()))
# Add widget to the user interface
context.add_widget(self._widget)
# Creates monitors that update data on the GUI periodically
self.monitor_operating_mode()
self.monitor_battery_voltage()
self.monitor_system_time()
self.monitor_hosts()
def clear_hosts(self):
'''
Builds a list of host dictionaries that contain the devices hostname,
an unknown IP address, and an unknown status in the hosts' receiving
variable.
'''
self.hosts["received"] = Hosts()
for hostname in self.hosts["received"].hostnames.split():
host = Host()
host.hostname = hostname
host.ip = "Unknown"
host.status = "Unknown"
self.hosts["received"].hosts.append(host)
def connect_ui(self):
'''
Links objects in the dashboard GUI to variables in the backend
dashboard object.
'''
# Kill status
self.kill_status_frame = self._widget.findChild(QtGui.QFrame, "kill_status_frame")
self.kill_status_status = self._widget.findChild(QtGui.QLabel, "kill_status_status")
# Operating mode status
self.operating_mode_frame = self._widget.findChild(QtGui.QFrame, "operating_mode_frame")
self.operating_mode_status = self._widget.findChild(QtGui.QLabel, "operating_mode_status")
# Battery voltage
self.battery_voltage_frame = self._widget.findChild(QtGui.QFrame, "battery_voltage_frame")
self.battery_voltage_status = self._widget.findChild(QtGui.QLabel, "battery_voltage_status")
# System time
self.system_time_frame = self._widget.findChild(QtGui.QFrame, "system_time_frame")
self.system_time_status = self._widget.findChild(QtGui.QLabel, "system_time_status")
# Devices table
self.device_table = self._widget.findChild(QtGui.QFrame, "device_table")
# Control panel buttons
toggle_kill_button = self._widget.findChild(QtGui.QPushButton, "toggle_kill_button")
toggle_kill_button.clicked.connect(self.remote.toggle_kill)
station_hold_button = self._widget.findChild(QtGui.QPushButton, "station_hold_button")
station_hold_button.clicked.connect(self.remote.station_hold)
rc_control_button = self._widget.findChild(QtGui.QPushButton, "rc_control_button")
rc_control_button.clicked.connect(self.remote.select_rc_control)
emergency_control_button = self._widget.findChild(QtGui.QPushButton, "emergency_control_button")
emergency_control_button.clicked.connect(self.remote.select_emergency_control)
keyboard_control_button = self._widget.findChild(QtGui.QPushButton, "keyboard_control_button")
keyboard_control_button.clicked.connect(self.remote.select_keyboard_control)
autonomous_control_button = self._widget.findChild(QtGui.QPushButton, "autonomous_control_button")
autonomous_control_button.clicked.connect(self.remote.select_autonomous_control)
# Defines the color scheme as QT style sheets
self.colors = {
"red": "QWidget {background-color:#FF432E;}",
"green": "QWidget {background-color:#B1EB00;}",
"blue": "QWidget {background-color:#4AA8DB;}",
"yellow": "QWidget {background-color:#FDEF14;}",
"orange": "QWidget {background-color:#FFA500;}"
}
def connect_ros(self):
'''
Connect ROS nodes, services, and alarms to variables and methods
within this class.
'''
# Attempts to read the battery voltage parameters (sets them to defaults if they have not been set)
self.battery_low_voltage = rospy.get_param("/battery_monitor/battery_low_voltage", 24)
self.battery_critical_voltage = rospy.get_param("/battery_monitor/battery_critical_voltage", 20)
rospy.Subscriber("/wrench/current", String, self.cache_operating_mode)
rospy.Subscriber("/battery_monitor", Float32, self.cache_battery_voltage)
rospy.Subscriber("/clock", Clock, self.cache_system_time)
rospy.Subscriber("/host_monitor", Hosts, self.cache_hosts)
self.kill_listener = AlarmListener("kill", self.update_kill_status)
def _timeout_check(function):
'''
Simple decorator to check whether or not the remote control device is
timed out before running the function that was called.
'''
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
if (not self.remote.is_timed_out):
return function(self, *args, **kwargs)
return wrapper
@_timeout_check
def update_kill_status(self, alarm):
'''
Updates the kill status display when there is an update on the kill
alarm. Caches the last displayed kill status to avoid updating the
display with the same information twice.
'''
if (alarm.clear):
if (self.is_killed):
self.is_killed = False
self.kill_status_status.setText("Alive")
self.kill_status_frame.setStyleSheet(self.colors["green"])
elif (not self.is_killed):
self.is_killed = True
self.kill_status_status.setText("Killed")
self.kill_status_frame.setStyleSheet(self.colors["red"])
def cache_operating_mode(self, msg):
'''
Stores the operating mode when it is published.
'''
self.operating_mode["received"] = msg.data
self.operating_mode["stamp"] = rospy.Time.now()
def monitor_operating_mode(self):
'''
Monitors the operating mode on a 0.5s interval. Only updates the display
when the received operating mode has changed. Will time out and display
an unknown status if it has been 15s since the last message.
'''
# Sets the operating mode to 'Unknown' if no message has been received in 15s
if ((rospy.Time.now() - self.operating_mode["stamp"]) > rospy.Duration(15)):
self.operating_mode["received"] = "Unknown"
# Updates the displayed data if a new operating mode has been received since the last timer
if (self.operating_mode["received"] != self.operating_mode["cached"]):
self.update_operating_mode_status()
# Schedules the next instance of this method with a QT timer
QtCore.QTimer.singleShot(500, self.monitor_operating_mode)
@_timeout_check
def update_operating_mode_status(self):
'''
Updates the displayed operating mode status text and color.
'''
if (self.operating_mode["received"] == "Unknown"):
self.operating_mode_status.setText("Unknown")
self.operating_mode_frame.setStyleSheet(self.colors["red"])
elif (self.operating_mode["received"] == "rc"):
self.operating_mode_status.setText("Joystick")
self.operating_mode_frame.setStyleSheet(self.colors["blue"])
elif (self.operating_mode["received"] == "emergency"):
self.operating_mode_status.setText("Emergency")
self.operating_mode_frame.setStyleSheet(self.colors["orange"])
elif (self.operating_mode["received"] == "keyboard"):
self.operating_mode_status.setText("Keyboard")
self.operating_mode_frame.setStyleSheet(self.colors["yellow"])
elif (self.operating_mode["received"] == "autonomous"):
self.operating_mode_status.setText("Autonomous")
self.operating_mode_frame.setStyleSheet(self.colors["green"])
# Set the cached operating mode to the value that was just displayed
self.operating_mode["cached"] = self.operating_mode["received"]
def cache_battery_voltage(self, msg):
'''
Stores the battery voltage when it is published.
'''
self.battery_voltage["received"] = msg.data
self.battery_voltage["stamp"] = rospy.Time.now()
def monitor_battery_voltage(self):
'''
Monitors the battery voltage on a 1s interval. Only updates the display
when the received battery voltage has changed. Will time out and
display an unknown status if it has been 15s since the last message.
'''
# Sets the battery voltage to 'Unknown' if no message has been received in 15s
if (((rospy.Time.now() - self.battery_voltage["stamp"]) > rospy.Duration(15)) or
(self.battery_voltage["received"] is None)):
self.battery_voltage["received"] = "Unknown"
# Updates the displayed data if a new battery voltage has been received since the last timer
if (self.battery_voltage["received"] != self.battery_voltage["cached"]):
self.update_battery_voltage_status()
# Schedules the next instance of this method with a QT timer
QtCore.QTimer.singleShot(1000, self.monitor_battery_voltage)
@_timeout_check
def update_battery_voltage_status(self):
'''
Updates the displayed battery voltage status text and color. Uses a
cached warning color to make sure the color is not changed on every
update.
'''
if (self.battery_voltage["received"] == "Unknown"):
self.battery_voltage_status.setText("Unknown")
self.battery_voltage_frame.setStyleSheet(self.colors["red"])
self.battery_voltage["cached_warning_color"] = "red"
else:
# Set the frame background color to red if the battery is at or below the critical voltage
if (self.battery_voltage["received"] <= self.battery_critical_voltage):
if (self.battery_voltage["cached_warning_color"] != "red"):
self.battery_voltage_frame.setStyleSheet(self.colors["red"])
self.battery_voltage["cached_warning_color"] = "red"
# Set the frame background color to yellow if the battery is at or below the low voltage
elif (self.battery_voltage["received"] <= self.battery_low_voltage):
if (self.battery_voltage["cached_warning_color"] != "yellow"):
self.battery_voltage_frame.setStyleSheet(self.colors["yellow"])
self.battery_voltage["cached_warning_color"] = "yellow"
# Set the frame background color to green if the battery is above the warning voltages
elif (self.battery_voltage["cached_warning_color"] != "green"):
self.battery_voltage_frame.setStyleSheet(self.colors["green"])
self.battery_voltage["cached_warning_color"] = "green"
self.battery_voltage_status.setText(str(self.battery_voltage["received"])[:5])
# Set the cached battery voltage to the value that was just displayed
self.battery_voltage["cached"] = self.battery_voltage["received"]
def cache_system_time(self, msg):
'''
Stores the system time when it is published.
'''
self.system_time["received"] = msg.clock
def monitor_system_time(self):
'''
Updates data related to the system time on a 0.1s QT timer
'''
if (not self.remote.is_timed_out):
# Counts bad values of the received system time towards the timeout count
if ((self.system_time["received"] is None) or (int(str(self.system_time["received"])) <= 0) or
len(str(self.system_time["received"])) < 9):
self.system_time["timeout_count"] += 1
# Updates the displayed data if a new system time has been received since the last timer
elif (self.system_time["received"] != self.system_time["cached"]):
self.system_time["timeout_count"] = 0
self.update_system_time_status()
# Assumes that we have been disconnected if the system timeout counter reaches 50 (5s)
elif (self.system_time["timeout_count"] >= 50):
self.remote.is_timed_out = True
self.kill_status_status.setText("Unknown")
self.kill_status_frame.setStyleSheet(self.colors["red"])
self.operating_mode_status.setText("Unknown")
self.operating_mode_frame.setStyleSheet(self.colors["red"])
self.battery_voltage_status.setText("Unknown")
self.battery_voltage_frame.setStyleSheet(self.colors["red"])
self.battery_voltage["cached_warning_color"] = "red"
self.system_time_status.setText("Unknown")
self.system_time_frame.setStyleSheet(self.colors["red"])
# Otherwise, increments the system timeout counter
else:
self.system_time["timeout_count"] += 1
else:
# If a new system time has been received after a timeout, exit the timeout state
if (self.system_time["received"] != self.system_time["cached"]):
self.kill_status_status.setText("Active")
self.kill_status_frame.setStyleSheet(self.colors["green"])
self.is_killed = False
self.remote.is_timed_out = False
self.system_time["timeout_count"] = 0
self.update_system_time_status()
self.system_time_frame.setStyleSheet(self.colors["green"])
# Schedules the next instance of this method with a QT timer
QtCore.QTimer.singleShot(100, self.monitor_system_time)
def update_system_time_status(self):
'''
Updates the displayed system time status text and color.
'''
time_string = str(self.system_time["received"])
self.system_time_status.setText(time_string[:-9] + "." + time_string[-9:-8] + "s")
# Set the cached system time to the value that was just displayed
self.system_time["cached"] = self.system_time["received"]
def cache_hosts(self, msg):
'''
Converts a published hosts string into a hosts dictionary and stores it
in the hosts receiving variable.
'''
self.hosts["received"] = msg
self.hosts["stamp"] = rospy.Time.now()
def monitor_hosts(self):
'''
Monitors the network hosts on a | |
= ldata[y]
ydata[p] = (val, dev)
script = '''r=%s\n''' % json.dumps(data)
return script
# -----------------------------------------------------------------------------
def tmdata():
""" Controller to extract tree map data """
MAX_LEVEL = 3 # the lowest level for child lookups
# Requested locations
if not len(request.args):
response.headers["Content-Type"] = "application/json"
return '''sdata={}'''
else:
locations = list({int(a) for a in request.args if a.isdigit()})
sdata = Storage()
# Vulnerability Indicators
indicator_pids = session.s3.indicator_pids
idefaults = [(i, 0) for i in indicator_pids]
# Locations Hierarchy
ltable = s3db.gis_location
parents = list(locations)
children = list(locations)
while parents or children:
query = None
if children:
query = (ltable.id.belongs(children))
if parents:
q = (ltable.parent.belongs(parents))
if query is None:
query = q
else:
query |= q
if query is None:
break
rows = db(query).select(ltable.id,
ltable.name,
ltable.level,
ltable.parent)
next_parents = []
next_children = []
for row in rows:
this = row.id
level = int(row.level[1])
parent = row.parent
if this not in sdata:
sdata[this] = {}
data = sdata[this]
data["n"] = row.name
data["l"] = level
data["f"] = parent
data["p"] = 0
data["i"] = dict(idefaults)
data["x"] = this not in locations
if level > 0 and parent:
if parent in parents and \
level < MAX_LEVEL and \
parent in locations:
pass
#next_parents.append(this)
elif this in children and parent not in sdata:
next_children.append(parent)
parents = next_parents
children = next_children
# Population
if level in ("L0", "L1"):
# Lookup direct
ddtable = s3db.stats_demographic_data
query = (ddtable.location_id.belongs(set(sdata.keys()))) & \
(ddtable.parameter_id == session.s3.population_id)
rows = db(query).select(ddtable.location_id,
ddtable.value,
orderby=~ddtable.date)
location_ids = []
seen = location_ids.append
for row in rows:
location_id = row.location_id
if location_id not in location_ids:
seen(location_id)
sdata[location_id]["p"] = row.value
# Look up aggregates
atable = s3db.vulnerability_aggregate
query = (atable.location_id.belongs(set(sdata.keys()))) & \
(atable.parameter_id == session.s3.population_id)
rows = db(query).select(atable.location_id,
atable.sum,
atable.ward_count,
atable.reported_count,
orderby=~atable.date)
location_ids = []
seen = location_ids.append
for row in rows:
location_id = row.location_id
if location_id not in location_ids:
seen(location_id)
data = sdata[location_id]
if not data["p"]:
data["p"] = row.sum
data["t"] = row.ward_count
data["r"] = row.reported_count
# Calculate ward_count manually for Lx without aggregates
#commune_level = "L%s" % MAX_LEVEL
#for location_id in sdata.keys():
# data = sdata[location_id]
# if "t" not in data:
# data["r"] = 0
# # @ToDo: optimise this to do in-bulk rather than per-record
# data["t"] = len(gis.get_children(location_id, level=commune_level))
# Indicators
query = (atable.location_id.belongs(set(sdata.keys()))) & \
(atable.parameter_id.belongs(indicator_pids))
rows = db(query).select(atable.location_id,
atable.parameter_id,
atable.median)
for row in rows:
location_id = row.location_id
location_data = sdata[location_id]
if "i" not in location_data:
location_data["i"] = dict(idefaults)
location_data["i"][row.parameter_id] = row.median
# Return as script
script = '''sdata=%s\n''' % json.dumps(sdata)
response.headers["Content-Type"] = "application/json"
return script
# -----------------------------------------------------------------------------
def filter_report(filter_request, loc_id, loc_level):
"""
Helper function to extract the selections from the side panel
and generate a resource filter
"""
vdoc_table = db.vulnerability_document
gtable = db.gis_location
query = (vdoc_table.deleted != True) & \
(vdoc_table.location_id == gtable.id)
if loc_id != -1:
# Don't filter to just next level
#next_loc_level = "L%s" % (int(loc_level[1:]) + 1)
#child_locations = gis.get_children(loc_id, next_loc_level)
child_locations = gis.get_children(loc_id)
if len(child_locations) == 0:
query &= (vdoc_table.location_id == loc_id)
else:
child_ids = [row.id for row in child_locations]
child_ids.append(loc_id) # include the selected location
query &= (vdoc_table.location_id.belongs(child_ids))
else:
# Show the country-level
query &= (gtable.level == "L0")
if filter_request["from_date"]:
query &= (vdoc_table.date >= filter_request["from_date"])
if filter_request["to_date"]:
query &= (vdoc_table.date <= filter_request["to_date"])
document_types = ["vca"]
indicator = (vdoc_table.document_type == "vca")
if "indicator" in filter_request:
document_types.append("indicator")
if "demographics" in filter_request:
document_types.append("demographic")
if "map" in filter_request:
document_types.append("map")
if "images" in filter_request:
document_types.append("image")
if "reports" in filter_request:
document_types.append("other")
if len(document_types) == 1:
query &= (vdoc_table.document_type == "vca")
else:
query &= (vdoc_table.document_type.belongs(document_types))
if "myReports" in filter_request:
user_id = auth.user.id
query &= ((vdoc_table.approved_by == user_id)
| (vdoc_table.created_by == user_id))
if "text" in filter_request and filter_request["text"] != "":
utable = auth.settings.table_user
text = "%%%s%%" % filter_request["text"].lower()
query &= (vdoc_table.location_id == gtable.id)
query &= (vdoc_table.created_by == utable.id)
query &= ((gtable.name.lower().like(text))
| (utable.first_name.lower().like(text))
| (utable.last_name.lower().like(text)))
# Now ensure that all unapproved records are added to the return list
query = ((vdoc_table.deleted != True) & \
(vdoc_table.approved_by == None) & \
(vdoc_table.location_id == gtable.id)
) | (query)
return query
# -------------------------------------------------------------------------
def report_group(row):
"""
Virtual field to show the group that the report belongs to
used by vulnerability/report
"""
if "vulnerability_document" in row:
row = row["vulnerability_document"]
# These get i18n later
if row.approved_by is None:
return "Approval pending"
elif row.document_type == "vca":
return "VCA Report"
else:
return "Report"
# -----------------------------------------------------------------------------
def reportDataTable():
"""
Return a dataTable using the selected filter options
"""
from s3.s3data import S3DataTable
vdoc_table = s3db.vulnerability_document
vdoc_table.group = Field.Method("group", report_group)
gtable = db.gis_location
# -------------------------------------------------------------------------
# Set up custom represents
# -------------------------------------------------------------------------
def location_repr(id):
"""
Return the location name (level) wrapped in a span
"""
if not id:
repr_text = messages["NONE"]
else:
row = locations.get(id, None)
if not row:
repr_text = messages.UNKNOWN_OPT
else:
level = loc_labels[row["level"]]
repr_text = "%s (%s)" % (row["name"], level)
return SPAN(repr_text, _class="communeCell")
# -------------------------------------------------------------------------
def submitted_repr(id):
"""
Return the initial of the first name and the complete last name
"""
if not id:
repr_text = T("Imported data")
else:
row = users.get(id, None)
if row:
repr_text = "%s. %s" % (row["first_name"][0], row["last_name"])
else:
repr_text = messages.UNKNOWN_OPT
return repr_text
# -------------------------------------------------------------------------
def approved_repr(id):
"""
Return the initials of the first and the last name
"""
if id is None:
repr_text = APPROVAL_PENDING
elif id == 0:
repr_text = APPROVED
else:
row = users.get(id, None)
if row:
repr_text = T("Approved by %(first_name)s.%(last_name)s") % \
dict(first_name = row["first_name"][0],
last_name = row["last_name"][0])
else:
repr_text = messages.UNKNOWN_OPT
return repr_text
# -------------------------------------------------------------------------
def action_repr(id):
"""
Return the action button for this row
"""
approved = approvals.get(id, None)
if approved != None:
repr_text = A(VIEW,
_id = id,
_class = "viewButton",
_href = "javascript:viewReportDetails(%s)" % id
)
else:
repr_text = A(REVIEW,
_id = id,
_class = "reviewButton",
_href = "javascript:showReportDetails(%s)" % id
)
repr_text.append(A(CLOSE,
_class = "closeReviewButton",
_href = "javascript:hideReportDetails(%s)" % id
))
return repr_text
filter_request = request.post_vars
loc_level = -1
if filter_request:
loc_id = filter_request.get("location_id", -1)
if loc_id == "-1":
loc_id = -1
if loc_id:
row = db(gtable.id == loc_id).select(gtable.level,
gtable.path,
limitby=(0, 1)
).first()
try:
loc_level = row.level
except:
# Invalid location ID
loc_id = -1
else:
if loc_level == "L0":
L0 = loc_id
else:
L0 = row.path.split("/")[0]
filter = filter_report(filter_request, loc_id, loc_level)
if loc_id == -1:
loc_labels = gis.get_location_hierarchy()
else:
loc_labels = gis.get_location_hierarchy(location=L0)
#############################################################
# Note if list_fields are changed here then they also need
# to be changed in index, where the table is initialised
#############################################################
if loc_level == -1:
loc_list_field = "location_id$L0"
loc_group_field = "gis_location.L0"
elif loc_level == "L0":
loc_list_field = "location_id$L1"
loc_group_field = "gis_location.L1"
elif loc_level == "L1":
loc_list_field = "location_id$L2"
loc_group_field = "gis_location.L2"
elif loc_level == "L2":
loc_list_field = "location_id$L3"
loc_group_field = "gis_location.L3"
elif loc_level == "L3":
loc_list_field = "location_id$L3"
loc_group_field = "gis_location.L3"
# @ToDo: Support countries with L4s/L5s
#elif loc_level == "L4":
# loc_list_field = "location_id$L4"
# loc_group_field = "gis_location.L4"
list_fields = [(T("Action"), "id"),
(T("Date"), "date"),
(T("Location"), "location_id"),
# Field.Method
"group",
loc_list_field,
"document_type",
(T("Submitted by"), "created_by"),
(T("Status"), "approved_by"),
]
# Ensure that we also get the records awaiting for approval
resource = s3db.resource("vulnerability_document", unapproved=True)
if filter_request:
resource.add_filter(filter)
totalrows = resource.count()
data = resource.select(list_fields,
orderby=~vdoc_table.date,
limit=None,
count=True,
represent=False,
#raw_data=True
)
filteredrows = data["numrows"]
if filteredrows > 0:
# Do represents in-bulk
# @ToDo: Replace with S3Represents & define before select
approvals = {}
locations = []
lappend = locations.append
users = []
uappend = users.append
rows = data["rows"]
for row in rows:
#raw = row["_row"]
location_id = row["vulnerability_document.location_id"]
if location_id and location_id not in locations:
lappend(location_id)
user_id = row["vulnerability_document.created_by"]
if user_id and user_id not in users:
uappend(user_id)
user_id = row["vulnerability_document.approved_by"]
if user_id:
approvals[row["vulnerability_document.id"]] = user_id
if user_id not in users:
uappend(user_id)
lrows = db(gtable.id.belongs(locations)).select(gtable.id,
gtable.name,
gtable.level,
gtable.L1,
gtable.L2)
locations = lrows.as_dict()
utable = auth.settings.table_user
urows = db(utable.id.belongs(users)).select(utable.id,
utable.first_name,
utable.last_name)
users = urows.as_dict()
APPROVED = T("Approved")
APPROVAL_PENDING = T("Approval pending")
CLOSE = T("Close")
REVIEW = T("Review")
VIEW = T("View")
# Apply represents
date_repr = vdoc_table.date.represent
doc_type_repr = vdoc_table.document_type.represent
for | |
"gari": 14712,
"pou": 14713,
"xer": 14714,
"▁she": 14715,
"▁Kuch": 14716,
"IDA": 14717,
"▁Allar": 14718,
"▁Kaf": 14719,
"adis": 14720,
"imin": 14721,
"▁fake": 14722,
"gina": 14723,
"▁1300": 14724,
"lagen": 14725,
"▁FF": 14726,
"dere": 14727,
"mele": 14728,
"rana": 14729,
"▁more": 14730,
"203": 14731,
"▁Ion": 14732,
"▁Johannes": 14733,
"Best": 14734,
"gí": 14735,
"▁Hamma": 14736,
"898": 14737,
"の": 14738,
"guan": 14739,
"▁Carrer": 14740,
"▁Navarro": 14741,
"4.4": 14742,
"you": 14743,
"▁Finn": 14744,
"▁Lombard": 14745,
"lijst": 14746,
"liv": 14747,
"▁Return": 14748,
"▁road": 14749,
"163": 14750,
"Ỏ": 14751,
"▁European": 14752,
"▁Novgorod": 14753,
"▁Singer": 14754,
"▁Ça": 14755,
"mate": 14756,
"▁Bohemia": 14757,
"▁gamma": 14758,
"83)": 14759,
"bama": 14760,
"inami": 14761,
"▁Kurs": 14762,
"177": 14763,
"date": 14764,
"▁Conrad": 14765,
"▁Prima": 14766,
"▁UTC": 14767,
"▁Vettel": 14768,
"Photo": 14769,
"Des": 14770,
"ions": 14771,
"zuka": 14772,
"▁Dorot": 14773,
"1.6": 14774,
"build": 14775,
"kém": 14776,
"rath": 14777,
"氏": 14778,
"apo": 14779,
"gona": 14780,
"hlo": 14781,
"▁Aby": 14782,
"▁Clu": 14783,
"▁Ost": 14784,
"▁organ": 14785,
"1940": 14786,
"MHz": 14787,
"kom": 14788,
"tano": 14789,
"▁Gateway": 14790,
"▁Industry": 14791,
"Kan": 14792,
"cere": 14793,
"enta": 14794,
"▁Holiday": 14795,
"▁austral": 14796,
"41)": 14797,
"bida": 14798,
"▁Hora": 14799,
"▁sil": 14800,
"167": 14801,
"Tom": 14802,
"issima": 14803,
"▁Russian": 14804,
"BAN": 14805,
"nant": 14806,
"nz": 14807,
"▁Velo": 14808,
"646": 14809,
"dern": 14810,
"▁Zul": 14811,
"▁Gale": 14812,
"lín": 14813,
"▁Radi": 14814,
"tros": 14815,
"▁Croc": 14816,
"▁Mev": 14817,
"009": 14818,
"185": 14819,
"arrow": 14820,
"▁Dit": 14821,
"▁Insight": 14822,
"/06/2018": 14823,
"dze": 14824,
"▁Kura": 14825,
"Mit": 14826,
"Gen": 14827,
"▁Lina": 14828,
"3000": 14829,
"764": 14830,
"▁Host": 14831,
"▁hetero": 14832,
"▁Belg": 14833,
"▁dollar": 14834,
"734": 14835,
"vant": 14836,
"▁Bernardo": 14837,
"▁Kau": 14838,
"▁Pig": 14839,
"Der": 14840,
"▁Hammer": 14841,
"▁gelatin": 14842,
"▁Hubert": 14843,
"Centr": 14844,
"loh": 14845,
"▁Links": 14846,
"sma": 14847,
"▁Vila": 14848,
"fran": 14849,
"▁Friend": 14850,
"▁art": 14851,
"▁rum": 14852,
"▁Augsburg": 14853,
"bei": 14854,
"oke": 14855,
"▁Rajoy": 14856,
"708": 14857,
"facebook": 14858,
"▁Rainbow": 14859,
"▁18-": 14860,
"▁BAS": 14861,
"▁Sind": 14862,
"▁town": 14863,
"nte": 14864,
"55)": 14865,
"▁Poi": 14866,
"▁Scal": 14867,
"22)": 14868,
"ITU": 14869,
"▁Kiri": 14870,
"657": 14871,
"Build": 14872,
"DAC": 14873,
"▁upload": 14874,
"147": 14875,
"dhar": 14876,
"yri": 14877,
"▁Bada": 14878,
"taro": 14879,
"▁Chinese": 14880,
"792": 14881,
"tola": 14882,
"Rock": 14883,
"micro": 14884,
"742": 14885,
"▁j": 14886,
"▁(1991)": 14887,
"▁07:00": 14888,
"▁Mohd": 14889,
"FZ": 14890,
"check": 14891,
"▁Salzburg": 14892,
"▁Rep": 14893,
"▁big": 14894,
"nil": 14895,
"sein": 14896,
"▁Ane": 14897,
"▁Baku": 14898,
"▁Republic": 14899,
"▁foot": 14900,
"ark": 14901,
"▁Navig": 14902,
"uck": 14903,
"▁Kalo": 14904,
"▁Dos": 14905,
"▁1100": 14906,
"▁Blanche": 14907,
"653": 14908,
"▁Ski": 14909,
"15)": 14910,
"60)": 14911,
"chod": 14912,
"gou": 14913,
"▁Tennis": 14914,
"▁Negre": 14915,
"151": 14916,
"13)": 14917,
"▁Budd": 14918,
"▁Monkey": 14919,
"▁pí": 14920,
"Ze": 14921,
"▁Tribun": 14922,
"SSA": 14923,
"hag": 14924,
"wati": 14925,
"ymo": 14926,
"▁Koke": 14927,
"▁Peak": 14928,
"88)": 14929,
"pack": 14930,
"nese": 14931,
"▁fur": 14932,
"▁70-": 14933,
"UC": 14934,
"crates": 14935,
"Ye": 14936,
"▁Cen": 14937,
"736": 14938,
"pang": 14939,
"▁Ces": 14940,
"▁Vil": 14941,
"▁Helm": 14942,
"▁Naj": 14943,
"▁Poor": 14944,
"▁Centro": 14945,
"▁Suh": 14946,
"eren": 14947,
"vey": 14948,
"awi": 14949,
"new": 14950,
"▁mil": 14951,
"chine": 14952,
"iPhone": 14953,
"▁Sarawak": 14954,
"▁Termin": 14955,
"ester": 14956,
"hasa": 14957,
"▁Barn": 14958,
"▁Foot": 14959,
"▁Qan": 14960,
"riz": 14961,
"▁17:00": 14962,
"▁Pí": 14963,
"▁account": 14964,
"31)": 14965,
"644": 14966,
"▁Sko": 14967,
"▁Leopold": 14968,
"Holstein": 14969,
"▁Boer": 14970,
"▁Mug": 14971,
"▁Professional": 14972,
"▁bluetooth": 14973,
"esse": 14974,
"▁Freiburg": 14975,
"▁Hak": 14976,
"▁Ultimate": 14977,
"▁bob": 14978,
"cross": 14979,
"mella": 14980,
"▁Bollywood": 14981,
"▁Fine": 14982,
"865": 14983,
"▁Aurora": 14984,
"▁June": 14985,
"662": 14986,
"pr": 14987,
"▁Million": 14988,
"▁Cine": 14989,
"▁Visual": 14990,
"aldi": 14991,
"xor": 14992,
"▁Blau": 14993,
"▁Elect": 14994,
"▁Kier": 14995,
"696": 14996,
"corr": 14997,
"▁mal": 14998,
"▁samu": 14999,
"173": 15000,
"▁Barak": 15001,
"RK": 15002,
"▁(1990)": 15003,
"▁pub": 15004,
"684": 15005,
"44)": 15006,
"Alb": 15007,
"Daily": 15008,
"969": 15009,
"usi": 15010,
"▁Egypt": 15011,
"hre": 15012,
"▁Stel": 15013,
"▁plan": 15014,
"987": 15015,
"Pierre": 15016,
"Sync": 15017,
"gaz": 15018,
"▁Orig": 15019,
"Shabaab": 15020,
"Direct": 15021,
"lman": 15022,
"dala": 15023,
"▁Coral": 15024,
"▁Haf": 15025,
"▁power": 15026,
"RED": 15027,
"kaze": 15028,
"sted": 15029,
"oder": 15030,
"▁Chemi": 15031,
"▁Federico": 15032,
"kên": 15033,
"loni": 15034,
"▁Ame": 15035,
"▁Lena": 15036,
"▁Mango": 15037,
"▁gris": 15038,
"vision": 15039,
"▁Nit": 15040,
"Den": 15041,
"▁Medicine": 15042,
"brio": 15043,
"maker": 15044,
"nad": 15045,
"yst": 15046,
"rz": 15047,
"▁elegan": 15048,
"▁year": 15049,
"iant": 15050,
"▁Condo": 15051,
"TOS": 15052,
"Touch": 15053,
"▁François": 15054,
"YS": 15055,
"ğ": 15056,
"▁Dru": 15057,
"▁URL": 15058,
"887": 15059,
"etus": 15060,
"▁Account": 15061,
"oslav": 15062,
"trici": 15063,
"inis": 15064,
"▁Imp": 15065,
"▁cari": 15066,
"ETA": 15067,
"dr": 15068,
"▁Common": 15069,
"▁Corona": 15070,
"MIC": 15071,
"ckel": 15072,
"urs": 15073,
"verti": 15074,
"prote": 15075,
"tila": 15076,
"▁Vau": 15077,
"Van": 15078,
"▁Airbnb": 15079,
"▁Reporter": 15080,
"dding": 15081,
"▁xxx": 15082,
"206": 15083,
"oval": 15084,
"▁Kanda": 15085,
"shen": 15086,
"▁Stri": 15087,
"four": 15088,
"▁banner": 15089,
"ized": 15090,
"mbro": 15091,
"▁Bike": 15092,
"869": 15093,
"chten": 15094,
"trin": 15095,
"▁Eh": 15096,
"▁Sales": 15097,
"frame": 15098,
"▁Kolkata": 15099,
"▁Rota": 15100,
"784": 15101,
"yuan": 15102,
"▁Babi": 15103,
"▁Still": 15104,
"Reuters": 15105,
"thyr": 15106,
"cze": 15107,
"vr": 15108,
"tender": 15109,
"AFP": 15110,
"orna": 15111,
"▁PayPal": 15112,
"rma": 15113,
"▁Strong": 15114,
"▁Teo": 15115,
"Ham": 15116,
"anche": 15117,
"▁COM": 15118,
"▁Sharma": 15119,
"35)": 15120,
"case": 15121,
"gula": 15122,
"179": 15123,
"771": 15124,
"International": 15125,
"WG": 15126,
"▁1789": 15127,
"▁metal": 15128,
"sons": 15129,
"viv": 15130,
"▁Desktop": 15131,
"▁RF": 15132,
"dida": 15133,
"▁Gus": 15134,
"▁Program": 15135,
"698": 15136,
"OTO": 15137,
"gwa": 15138,
"noma": 15139,
"sette": 15140,
"vice": 15141,
"▁vec": 15142,
"▁Mila": 15143,
"92)": 15144,
"▁Nacional": 15145,
"▁Saha": 15146,
"▁Yeni": 15147,
"AK": 15148,
"lean": 15149,
"Rus": 15150,
"▁Escape": 15151,
"ński": 15152,
"▁Trek": 15153,
"▁cali": 15154,
"alta": 15155,
"stu": 15156,
"▁Saba": 15157,
"ants": 15158,
"vill": 15159,
"▁boto": 15160,
"Cre": 15161,
"▁Bomb": 15162,
"OOO": 15163,
"RAT": 15164,
"tych": 15165,
"▁Indra": 15166,
"rc": 15167,
"▁Chef": 15168,
"▁Beyond": 15169,
"▁Cliff": 15170,
"▁Maas": 15171,
"▁tau": 15172,
"YH": 15173,
"mpel": 15174,
"▁Olive": 15175,
"▁Vida": 15176,
"rge": 15177,
"sura": 15178,
"▁Bud": 15179,
"▁Madi": 15180,
"usan": 15181,
"▁Nazar": 15182,
"▁0.5": 15183,
"20)": 15184,
"qué": 15185,
"ÁT": 15186,
"nek": 15187,
"▁Oriental": 15188,
"▁Rakh": 15189,
"018": 15190,
"Bad": 15191,
"type": 15192,
"▁Edmund": 15193,
"966": 15194,
"mbar": 15195,
"▁cli": 15196,
"quen": 15197,
"▁Direction": 15198,
"▁Solutions": 15199,
"▁Tourism": 15200,
"▁producer": 15201,
"986": 15202,
"kop": 15203,
"▁[3]": 15204,
"chem": 15205,
"▁Kato": 15206,
"▁Voyage": 15207,
"mpo": 15208,
"vien": 15209,
"Type": 15210,
"99)": 15211,
"samba": 15212,
"▁Flag": 15213,
"chow": 15214,
"▁Jav": 15215,
"▁cine": 15216,
"ades": 15217,
"zela": 15218,
"▁pit": 15219,
"AMI": 15220,
"▁Moni": 15221,
"itas": 15222,
"kiri": 15223,
"▁kip": 15224,
"lara": 15225,
"tiny": 15226,
"▁Rho": 15227,
"▁aero": 15228,
"dho": 15229,
"▁Fru": 15230,
"▁Driver": 15231,
"escent": 15232,
"plast": 15233,
"bed": 15234,
"▁Guitar": 15235,
"other": 15236,
"▁stri": 15237,
"Pfalz": 15238,
"quam": 15239,
"tania": 15240,
"andi": 15241,
"trum": 15242,
"▁Tada": 15243,
"Ỉ": 15244,
"36)": 15245,
"asta": 15246,
"mento": 15247,
"▁Mesa": 15248,
"zam": 15249,
"▁Marta": 15250,
"▁Mio": 15251,
"▁TripAdvisor": 15252,
".01.": 15253,
"kala": 15254,
"863": 15255,
"ias": 15256,
"▁corona": 15257,
"UND": 15258,
"▁Bold": 15259,
"▁Sama": 15260,
"01)": 15261,
"ILA": 15262,
"▁Homo": 15263,
"▁sem": 15264,
"1956": 15265,
"46)": 15266,
"sumi": 15267,
"action": 15268,
"inum": 15269,
"vang": 15270,
"▁Need": 15271,
"305": 15272,
"▁Machi": 15273,
"oper": 15274,
"titude": 15275,
"▁Augustus": 15276,
"▁Kubi": 15277,
":20": 15278,
"Af": 15279,
"wir": 15280,
"1.7": 15281,
"▁Pati": 15282,
"▁Ivo": 15283,
"dec": 15284,
"▁Grass": 15285,
"▁roa": 15286,
"versa": 15287,
"album": 15288,
"lur": 15289,
"▁Chic": 15290,
"153": 15291,
"gado": 15292,
"▁CEN": 15293,
"▁Natural": 15294,
"▁Warszawa": 15295,
"êrê": 15296,
"▁Bate": 15297,
"▁PAN": 15298,
"▁Suisse": 15299,
"▁PowerPoint": 15300,
"▁Rail": 15301,
"▁food": 15302,
"▁house": 15303,
"sena": 15304,
"▁Senti": 15305,
"2.4": 15306,
"lulu": 15307,
"▁Alpin": 15308,
"944": 15309,
"gent": 15310,
"▁CHÍNH": 15311,
"▁vote": 15312,
"vania": 15313,
"▁Naruto": 15314,
"DIG": 15315,
"tree": 15316,
"▁Elise": 15317,
"tzen": 15318,
"▁Aureli": 15319,
"izen": 15320,
"▁Kerala": 15321,
"▁side": 15322,
"LI": 15323,
"TTE": 15324,
"xus": 15325,
"▁firmware": 15326,
"29)": 15327,
"holm": 15328,
"172": 15329,
"▁got": 15330,
"18)": 15331,
"▁BU": 15332,
"▁KG": 15333,
"▁Licht": 15334,
"▁Private": 15335,
"32)": 15336,
"pov": 15337,
"▁Tir": 15338,
"09)": 15339,
"pano": 15340,
"▁Rab": 15341,
"XY": 15342,
"loop": 15343,
"▁Reuni": 15344,
"7.2": 15345,
"ALA": 15346,
"▁Uk": 15347,
"▁López": 15348,
"▁Bene": 15349,
"▁Bola": 15350,
"▁Bonne": 15351,
| |
<filename>pip_tests.py
# Source code to reproduce the results given in the paper:
# "A Novel Point Inclusion Test for Convex Polygons Based on Voronoi Tessellations"
# BSD 3-Clause License
#
# Copyright (c) 2020, <NAME>, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import matplotlib.pyplot as plt
import time
from math import sin, cos, pi
import timeit
from numpy.random import default_rng
import pickle
# seed the random generator
rg = default_rng(12345)
# Timeit constants
REPEAT = 10
NUMBER = 10
# Time scale of the results
TIME_SCALE = 1e-9 # 1ns
# Test polygon generation constants
center_of_poly = (3.0, 3.0)
radius_of_poly = 10.0
rotation_of_poly = pi/6
# Number of edges for testing
polygon_test_sizes = range(3, 16)
# Number of test points
N_TEST_POINTS = 1000000
# For readibility
X = 0
Y = 1
def calculate_centroid(poly):
"""Calculates centroid of a polygon
Args:
poly (ndarray): Vertices of the polygon, (2,n)
Returns:
ndarray: centroid, (2,)
"""
v = poly # vertices
v_r = np.roll(v, 1, axis=1) # rolled vertices
a = v_r[X] * v[Y] - v[X] * v_r[Y]
area = a.sum() / 2
centroid = ((v + v_r) @ a) / (6 * area)
return centroid
def calculate_generators(poly):
"""Calculates voronoi generator points as a centroidal voronoi polygon
Args:
poly (ndarray): Vertices of the polygon, (2,n)
Returns:
ndarray: Generator points, (2,(n+1))
"""
p_0 = calculate_centroid(poly)
v = poly # vertices
v_r = np.roll(v, 1, axis=1) # rolled vertices
a = v[Y] - v_r[Y]
b = v_r[X] - v[X]
c = - (a * v[X] + b * v[Y])
w = np.array([[b**2 - a**2, -2 * a * b],
[-2 * a * b, a**2 - b**2]])
p_k = (np.einsum('ijk,j', w, p_0) - 2 * c * np.array([a, b])) / (a**2 + b**2)
return np.hstack((p_0.reshape(-1,1), p_k))
def voronoi(points, poly):
"""Voronoi point inclusion test
Args:
points (ndarray): Test points, (2,m)
poly (ndarray): Vertices of the polygon, (2,n)
Returns:
ndarray(dtype=bool): Result of the point inclusion test, (m,)
"""
generators = calculate_generators(poly)
x_minus_p = points[:,np.newaxis,:] - generators[...,np.newaxis]
metrics = np.einsum("ijk,ijk->jk", x_minus_p, x_minus_p)
result = (metrics[0:1] < metrics[1:]).all(axis=0)
return result
def crossing(points, poly):
"""Ray crossings point inclusion test
Args:
points (ndarray): Test points, (2,m)
poly (ndarray): Vertices of the polygon, (2,n)
Returns:
ndarray(dtype=bool): Result of the point inclusion test, (m,)
"""
q = points[:,np.newaxis,:] # queried points
v = poly[...,np.newaxis] # vertices
vr = np.roll(v, 1, axis=1) # rolled vertices
v_delta = v - vr # differences between successive vertices
in_range = np.logical_xor(v[Y] > q[Y], vr[Y] > q[Y])
going_up = v[Y] > vr[Y]
lhs = q[Y] * v_delta[X] - q[X] * v_delta[Y]
rhs = vr[Y] * v_delta[X] - vr[X] * v_delta[Y]
on_left = np.where(going_up, lhs > rhs, lhs < rhs)
crossings = np.logical_and(in_range, on_left)
result = (crossings.sum(axis=0) % 2) != 0
return result
def sign_of_offset(points, poly):
"""Sign of offset point inclusion test
Args:
points (ndarray): Test points, (2,m)
poly (ndarray): Vertices of the polygon, (2,n)
Returns:
ndarray(dtype=bool): Result of the point inclusion test, (m,)
"""
q = points[:,np.newaxis,:] # queried points
v = poly[...,np.newaxis] # vertices
vr = np.roll(v, 1, axis=1) # rolled vertices
v_delta = v - vr # differences between successive vertices
lhs = q[Y] * v_delta[X] - q[X] * v_delta[Y]
rhs = vr[Y] * v_delta[X] - vr[X] * v_delta[Y]
# Check if all True or all False, no mix
result = (lhs < rhs).sum(axis=0) % poly.shape[1] == 0
return result
def transform(poly, xytheta):
"""Affine transformation of the polygon
Args:
poly (ndarray): Vertices of the polygon, (2,n)
xytheta (tuple): x, y, theta
Returns:
(ndarray): Vertices of transformed polygon, (2,n)
"""
augmented = np.vstack((poly, np.ones(poly.shape[1])))
x, y, t = xytheta
transform = np.asarray([[cos(t), -sin(t), x],
[sin(t), cos(t), y],
[0, 0, 1]])
tfed_poly = transform @ augmented
return tfed_poly[0:2,:]
def visualize_test(points, poly, result, title, save=False, plot_generators=False):
plt.figure(figsize=(5, 5))
plt.title("{} edges {}".format(poly.shape[1], title))
plt.axis('equal')
plt.subplots_adjust(left=0.08, right=0.95, bottom=0.08, top=0.92)
plt.fill(poly[0,:], poly[1,:], "#75bbfd")
xx = points[0,:]
yy = points[1,:]
plt.plot(xx[result], yy[result], 'ro', markersize=1)
plt.plot(xx[np.logical_not(result)], yy[np.logical_not(result)], 'ko', markersize=1)
if plot_generators:
generators = calculate_generators(poly)
plt.plot(generators[0], generators[1], 'go', markersize=7)
if save:
plt.savefig("{} edges {}.pdf".format(poly.shape[1], title))
else:
plt.show()
def create_convex_poly(n_vertices=7, radius=1.0, center=(0.0,0.0)):
theta = np.linspace(0, 2*pi, n_vertices, False)
vertices = radius * np.array([np.cos(theta), np.sin(theta)]) + np.array(center).reshape(2,1)
return vertices
def generate_timing_plot(all_results):
methods = {"ray crossing", "sign of offset", "voronoi"}
fig, ax = plt.subplots(figsize=(5,5))
linestyles = ['-.', '--', '-', ':']
ax.set_title("Tests for {} points".format(N_TEST_POINTS))
ax.set_yscale("log")
ax.grid(True, 'both')
ax.set_xlabel("Number of edges")
ax.set_ylabel("Per point processing time (ns)")
for i, method in enumerate(methods):
x = []
y = []
for idx, n_vertices in enumerate(all_results.keys()):
x.append(n_vertices)
y.append(all_results[n_vertices][method])
ax.plot(x, y, label=method, linestyle=linestyles[i], linewidth=3)
ax.legend()
fig.tight_layout()
fig.savefig("experimental.pdf")
fig.savefig("experimental.png")
def experimental_results():
all_results = {}
for idx, n_vertices in enumerate(polygon_test_sizes):
timings = {"ray crossing" : {},
"sign of offset" : {},
"voronoi" : {}}
poly = create_convex_poly(n_vertices=n_vertices, radius=radius_of_poly)
tfed_poly = transform(poly, (*center_of_poly, rotation_of_poly))
test_points = (rg.random((2, N_TEST_POINTS)) - 0.5) * 3 * radius_of_poly + np.array(center_of_poly).reshape(2,1)
# ################ RAY CROSSING TEST ################
timer_crossing = timeit.Timer(lambda : crossing(test_points, tfed_poly))
crossing_time = timer_crossing.repeat(repeat=REPEAT, number=NUMBER)
crossing_time = np.min(crossing_time) / NUMBER / N_TEST_POINTS / TIME_SCALE
print("Timing with ray crossing for {} vertices, {} points: {} ns/pt".format(n_vertices, N_TEST_POINTS, crossing_time))
timings["ray crossing"] = crossing_time
# ################ SIGN OF OFFSET TEST ################
timer_signoff = timeit.Timer(lambda : sign_of_offset(test_points, tfed_poly))
signoff_time = timer_signoff.repeat(repeat=REPEAT, number=NUMBER)
signoff_time = np.min(signoff_time) / NUMBER / N_TEST_POINTS / TIME_SCALE
print("Timing with sign of offset for {} vertices, {} points: {} ns/pt".format(n_vertices, N_TEST_POINTS, signoff_time))
timings["sign of offset"] = signoff_time
# ################ VORONOI TEST ################
timer_voronoi = timeit.Timer(lambda : voronoi(test_points, tfed_poly))
voronoi_time = timer_voronoi.repeat(repeat=REPEAT, number=NUMBER)
voronoi_time = np.min(voronoi_time) / NUMBER / N_TEST_POINTS / TIME_SCALE
print("Timing with voronoi for {} vertices, {} points: {} ns/pt".format(n_vertices, N_TEST_POINTS, voronoi_time))
timings["voronoi"] = voronoi_time
all_results[n_vertices] = timings
return all_results
def near_poly_sample(poly, n_points, k_nearness=0.1):
v = poly
vr = np.roll(v, 1, axis=1)
a = v[1] - vr[1]
b = vr[0] - v[0]
c = - (a * v[0] + b * v[1])
x = (rg.random((2,n_points)) - 0.5) * 2 * (1 + 2 * k_nearness) * radius_of_poly + np.array(center_of_poly).reshape(2,1)
centroid = calculate_centroid(poly)
in_radius = np.linalg.norm(x - centroid.reshape(2,1), axis=0) < ((1 + k_nearness) * radius_of_poly)
x = x[:,in_radius]
# https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line
# d = (ax + by + c) / sqrt(a^2 + b^2)
d = (np.outer(a, x[0]) + np.outer(b, x[1]) + c.reshape(-1,1)) / np.sqrt(a**2 + b**2).reshape(-1,1)
x_near = x[:,(np.abs(d) < (k_nearness * radius_of_poly)).any(axis=0)]
return x_near
def correctness_test():
poly = create_convex_poly(n_vertices=5, radius=radius_of_poly)
tfed_poly = transform(poly, (*center_of_poly, rotation_of_poly))
test_points = near_poly_sample(tfed_poly, 10000, 0.5)
c_test = crossing(test_points, tfed_poly)
v_test = voronoi(test_points, tfed_poly)
if (c_test != v_test).sum() == 0:
# Test passed
visualize_test(test_points, tfed_poly, voronoi(test_points, tfed_poly), "voronoi", True, True)
else:
print("Correctness test failed")
def main():
all_results = experimental_results()
with open('exp_results.pickle', 'wb') as f:
pickle.dump(all_results, f, pickle.HIGHEST_PROTOCOL)
# with open('exp_results.pickle', 'rb') as f:
# all_results | |
""" show_isis.py
IOSXE parsers for the following show commands:
* show isis neighbors
* show isis hostname
* show isis lsp-log
* show isis database
* show isis database detail
* show isis database verbose
* show isis node
* show isis topology
* show isis topology {flex_algo}
* show isis flex-algo
* show isis flex-algo {flex_algo}
* show isis adjacency stagger
* show isis adjacency stagger all
* show isis adjacency stagger detail
* show isis rib
* show isis rib {flex_algo}
* show isis rib {source_ip}
* show isis rib {source_ip} {subnet_mask}
* show isis rib redistribution
"""
# Python
from ftplib import parse257
import re
from aiohttp import TraceConnectionQueuedEndParams
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Optional, Or
from genie.libs.parser.utils.common import Common
class ShowIsisNeighborsSchema(MetaParser):
"""Schema for show isis neighbors"""
schema = {
'isis': {
Any(): {
Optional('neighbors'): {
Any(): {
'type': {
Any(): {
'interfaces': {
Any(): {
'circuit_id': str,
'holdtime': str,
'ip_address': str,
'state': str,
}
}
}
}
}
},
}
}
}
class ShowIsisNeighbors(ShowIsisNeighborsSchema):
"""Parser for show isis neighbors"""
cli_command = 'show isis neighbors'
exclude = ['holdtime']
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# initial return dictionary
ret_dict = {}
tag_null = True
for line in out.splitlines():
line = line.strip()
# Tag isis_net:
p1 = re.compile(r'^Tag +(?P<isis_name>\S+)\s*:$')
m = p1.match(line)
if m:
isis_name = m.groupdict()['isis_name']
isis_dict = ret_dict.setdefault('isis', {}).setdefault(isis_name, {})
tag_null = False
continue
# LAB-9001-2 L1 Te0/0/26 10.239.7.29 UP 27 00
p2 = re.compile(r'^(?P<system_id>\S+)\s+(?P<type>\S+)\s+(?P<interface>\S+)\s+'
'(?P<ip_address>\S+)\s+(?P<state>(UP|DOWN|INIT|NONE)+)\s+'
'(?P<holdtime>\S+)\s+(?P<circuit_id>\S+)$')
m = p2.match(line)
if m:
system_id = m.groupdict()['system_id']
isis_type = m.groupdict()['type']
if tag_null:
neighbour_dict = ret_dict.setdefault('isis', {}).setdefault('null', {}).\
setdefault('neighbors', {}).setdefault(system_id, {})
else:
neighbour_dict = isis_dict.setdefault('neighbors', {}).setdefault(system_id, {})
type_dict = neighbour_dict.setdefault('type', {}).setdefault(isis_type, {})
interface_name = Common.convert_intf_name(m.groupdict()['interface'])
interfaces_dict = type_dict.setdefault('interfaces', {}).setdefault(interface_name, {})
interfaces_dict['ip_address'] = m.groupdict()['ip_address']
interfaces_dict['state'] = m.groupdict()['state']
interfaces_dict['holdtime'] = m.groupdict()['holdtime']
interfaces_dict['circuit_id'] = m.groupdict()['circuit_id']
continue
return ret_dict
class ShowIsisHostnameSchema(MetaParser):
"""Schema for show isis hostname"""
schema = {
'tag': {
Any(): {
Optional('hostname_db'): {
'hostname': {
Any(): {
'hostname': str,
Optional('level'): int,
Optional('local_router'): bool,
},
}
}
},
}
}
class ShowIsisHostname(ShowIsisHostnameSchema):
"""Parser for show isis hostname"""
cli_command = 'show isis hostname'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# initial return dictionary
result_dict = {}
# Level System ID Dynamic Hostname (VRF1)
p1 = re.compile(r'^Level +System +ID +Dynamic +Hostname +'
r'\((?P<tag>\w+)\)$')
# 2 7777.77ff.eeee R7
# * 2222.22ff.4444 R2
# * 2001:0db8:85a3:0000:0000:8a2e:0370:7334.
p2 = re.compile(r'^(?P<level>\d+)?(\s?(?P<star>\*))? +'
r'(?P<system_id>[a-zA-Z\d\.\:]+) +(?P<dynamic_hostname>\w+)$')
for line in out.splitlines():
line = line.strip()
# Level System ID Dynamic Hostname (VRF1)
m = p1.match(line)
if m:
group = m.groupdict()
tag_dict = result_dict.setdefault('tag', {})\
.setdefault(group['tag'],{})
continue
# 2 7777.77ff.eeee R7
# * 2001:0db8:85a3:0000:0000:8a2e:0370:7334.
m = p2.match(line)
if m:
group = m.groupdict()
hostname_dict = tag_dict.setdefault('hostname_db', {}).\
setdefault('hostname', {}).\
setdefault(group['system_id'], {})
hostname_dict.update({'hostname': group['dynamic_hostname']})
if group['level']:
hostname_dict.update({'level': int(group['level'])})
if group['star']:
hostname_dict.update({'local_router': True})
continue
return result_dict
class ShowIsisLspLogSchema(MetaParser):
"""Schema for show isis lsp-log"""
schema = {
'tag': {
Any(): {
'lsp_log': {
'level': {
Any(): {
'index': {
Any(): {
'triggers': str,
'when': str,
'count': int,
Optional('interface'): str,
},
},
},
},
}
},
}
}
class ShowIsisLspLog(ShowIsisLspLogSchema):
"""Parser for show isis lsp-log"""
cli_command = 'show isis lsp-log'
exclude = ['when']
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# initial return dictionary
result_dict = {}
# Tag VRF1:
p1 = re.compile(r'^Tag +(?P<tag>\w+):$')
# Level 1 LSP log
p2 = re.compile(r'^Level +(?P<level>\d+) +LSP +log$')
# When Count Interface Triggers
# 01:13:52 5 CONFIG OTVINFOCHG
# 00:25:46 2 GigabitEthernet4 NEWADJ DIS
p3 = re.compile(r'^(?P<when>[\w\:]+) +(?P<count>\d+)( +(?P<interface>[a-zA-Z]+[\d/.]+))? +(?P<triggers>[\S\ ]+)$')
tag = "none"
for line in out.splitlines():
line = line.strip()
# Tag VRF1:
m = p1.match(line)
if m:
group = m.groupdict()
tag = group['tag']
continue
# Level 1 LSP log
m = p2.match(line)
if m:
group = m.groupdict()
level = int(group['level'])
index = 1
continue
# When Count Interface Triggers
# 01:13:52 5 CONFIG OTVINFOCHG
# 00:25:46 2 GigabitEthernet4 NEWADJ DIS
m = p3.match(line)
if m:
group = m.groupdict()
tag_dict = result_dict.setdefault('tag', {}).\
setdefault(tag, {}).\
setdefault('lsp_log', {}). \
setdefault('level', {}). \
setdefault(level, {}). \
setdefault('index', {}).\
setdefault(index, {})
tag_dict.update({'when': group['when']})
tag_dict.update({'count': int(group['count'])})
tag_dict.update({'triggers': group['triggers']})
if group['interface']:
tag_dict.update({'interface': group['interface']})
index += 1
continue
return result_dict
class ShowIsisDatabaseSchema(MetaParser):
"""Schema for show isis database detail"""
schema = {
'tag': {
Any(): {
'level': {
Any(): {
Any(): {
'lsp_sequence_num': str,
'lsp_checksum': str,
Optional('local_router'): bool,
'lsp_holdtime': str,
Optional('lsp_rcvd'): str,
Optional('lsp_index'): int,
'attach_bit': int,
'p_bit': int,
'overload_bit': int,
Optional('area_address'): str,
Optional('router_id'): str,
Optional("router_cap"): str,
Optional("d_flag"): bool,
Optional("s_flag"): bool,
Optional('nlpid'): str,
Optional('topology'): {
Any(): {
'code': str,
},
},
Optional('hostname'): str,
Optional('ip_address'): str,
Optional('ipv6_address'): str,
Optional(Or("is_neighbor", "extended_is_neighbor", "mt_is_neighbor")): {
Any(): {
"neighbor_id": str,
"metric": int,
Optional("adjacency_sid"): {
Any() :{
"f_flag": bool,
"b_flag": bool,
"v_flag": bool,
"l_flag": bool,
"s_flag": bool,
"p_flag": bool,
"weight": int
}
},
Optional("local_interface_id"): int,
Optional("remote_interface_id"): int,
Optional("interface_ip_address"): str,
Optional("neighbor_ip_address"): str,
Optional("interface_ipv6_address"): str,
Optional("neighbor_ipv6_address"): str,
Optional("physical_link_bw"): int,
Optional("admin_weight"): int,
Optional('reservable_global_pool_bw'): int,
Optional('unreserved_global_pool_bw'): {
'bw_0': int,
'bw_1': int,
'bw_2': int,
'bw_3': int,
'bw_4': int,
'bw_5': int,
'bw_6': int,
'bw_7': int,
},
Optional('uni_link_delay_avg'): {
'a_bit': bool,
'value': int,
},
Optional('uni_link_delay_min_max'): {
'a_bit': bool,
'min': int,
'max': int,
},
Optional('uni_link_delay_var'): int,
Optional('uni_link_loss'): {
'percent': str,
'anomalous': bool,
},
Optional("affinity"): str,
Optional("extended_affinity"): list,
Optional("asla"): {
"l_flag": bool,
"sa_length": int,
"uda_length": int
},
Optional("standard_application"): {
Any(): {
Optional("bit_mask"): str,
Optional("appl_spec_ext_admin_group"): list,
Optional("appl_spec_admin_group"): str,
Optional('appl_spec_uni_link_loss'): {
'percent': str,
'anomalous': bool,
},
Optional("appl_spec_uni_link_delay"): {
"a_bit": bool,
"min": int,
"max": int
}
}
}
},
},
Optional(Or("ipv4_interarea_reachability", "ipv4_internal_reachability", "mt_ipv6_reachability", "ipv6_reachability")): {
Any(): {
"ip_prefix": str,
"prefix_len": str,
"metric": int,
Optional("source_router_id"): str,
Optional("route_admin_tag"): int,
Optional("prefix_attr"): {
"x_flag": bool,
"r_flag": bool,
"n_flag": bool,
},
Optional("prefix_sid_index"): {
Any() : {
Optional("algorithm"): str,
Optional("flex_algo"): int,
Optional("flags"): {
"r_flag": bool,
"n_flag": bool,
"p_flag": bool,
"e_flag": bool,
"v_flag": bool,
"l_flag": bool,
}
}
}
},
},
Optional("flex_algo"): {
Any() : {
"metric_type": str,
"alg_type": str,
"priority": int,
Optional("m_flag"): bool,
Optional("exclude_any"): Any(),
Optional("include_any"): Any(),
Optional("include_all"): Any(),
},
},
Optional("segment_routing"): {
"spf": bool,
"strict_spf": bool,
"i_flag": bool,
"v_flag": bool,
"srgb_base": int,
"srgb_range": int,
"srlb_base": int,
"srlb_range": int,
"algorithms": set
},
Optional("node_msd"): int,
},
}
}
}
}
}
class ShowIsisDatabaseSuperParser(ShowIsisDatabaseSchema):
"""
Super Parser for
show isis database
show isis database detail
show isis database verbose
"""
cli_command = 'show isis database detail'
exclude = ['lsp_holdtime' , 'lsp_checksum', 'lsp_sequence_num']
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# initial return dictionary
result_dict = {}
tag = ""
# Tag VRF1:
p1 = re.compile(r'^Tag +(?P<tag>\w+):$')
# IS-IS Level-1 Link State Database:
# IS-IS Level-1 LSP r1.00-00
p2 = re.compile(r'^IS\-IS +Level\-(?P<level>\d+)\s+'
r'(Link +State +Database(:)?)?(LSP\s+(?P<host_name>\S+))?$')
# LSPID LSP Seq Num LSP Checksum LSP Holdtime/Rcvd ATT/P/OL
# R2.00-00 * 0x00000007 0x8A6D 403/* 1/0/0
p3 = re.compile(
r'^(?P<lspid>[\w\-\.]+)(\s*(?P<star>\*))?\s+(?P<lsp_seq_num>\w+)\s+'
r'(?P<lsp_checksum>\w+)\s+(?P<lsp_holdtime>[\d\*]+)'
r'(/(?P<lsp_rcvd>[\d\*]+))?\s+(?P<att>\d+)/(?P<p>\d+)/(?P<ol>\d+)\s*'
r'(\((?P<lsp_index>\d+)\))?$')
# Area Address: 49.0001
p4 = re.compile(r'^Area +Address: +(?P<area_address>[\w\.]+)$')
# NLPID: 0xCC 0x8E
p5 = re.compile(r'^NLPID: +(?P<nlp_id>[\w\s]+)$')
# Topology: IPv4 (0x0)
# IPv6 (0x4002 ATT)
p6 = re.compile(r'^(Topology: +)?(?P<topology>(IP)+[\w]+) +\((?P<code>[\w\s]+)\)$')
# Hostname: R2
p7 = re.compile(r'^Hostname: +(?P<hostname>\w+)$')
# IP Address: 10.84.66.66
p8 = re.compile(r'^IP +Address: +(?P<ip_address>[\d\.]+)$')
# Metric: 10 IS R2.01
# Metric: 10 IP 10.229.7.0/24
# Metric: 40 IS (MT-IPv6) R2.01
# Metric: 40 IS-Extended R2.01
# Metric: 10 IPv6 2001:DB8:2:2:2::2/128
# Metric: 10 IPv6 (MT-IPv6) 2001:DB8:20:2::/64
p9 = re.compile(r'^Metric: +(?P<metric>\d+) +(?P<type>[\w\-]+)( +\((?P<mt_ipv6>[\w\-]+)\))? +(?P<ip>\S+)$')
# IPv6 Address: 2001:DB8:66:66:66::66
p10 = re.compile(r'^IPv6 +Address: +(?P<ip_address>[\w\:]+)$')
# Router ID: 10.1.77.77
p11 = re.compile(r'^Router +ID: +(?P<router_id>\S+)$')
# Flex algorithm: 128 Metric-Type: IGP Alg-type: SPF Priority: 128
p12 = re.compile(r'^Flex algorithm:\s+(?P<flex_algo>\d+)\s+Metric-Type:\s+'
r'(?P<metric_type>\S+)\s+Alg-type:\s+(?P<alg_type>\S+)\s+'
r'Priority:\s+(?P<priority>\d+)$')
# M:1
p13 = re.compile(r'^M:\s*(?P<m_flag>0|1)$')
# Router CAP: 1.1.1.1, D:0, S:0
p14 = re.compile(r'^Router CAP:\s+(?P<router_cap>[\d+\.]+),\s+D:\s*(?P<d_flag>0|1)'
r',\s+S:\s*(?P<s_flag>0|1)$')
# Segment Routing: I:1 V:0, SRGB Base: 16000 Range: 8000
p15 = re.compile(r'^Segment\s+Routing:\s+I:(?P<i_flag>0|1)\s+'
r'V:(?P<v_flag>0|1),\s+SRGB\s+Base:\s+(?P<srgb_base>\d+)'
r'\s+Range:\s+(?P<srgb_range>\d+)$')
# Segment Routing Local Block: SRLB Base: 15000 Range: 1000
p16 = re.compile(r'^Segment\s+Routing\s+Local\s+Block:\s+SRLB Base:\s+'
r'(?P<srlb_base>\d+)\s+Range:\s+(?P<srlb_range>\d+)$')
# Segment Routing Algorithms: SPF, Strict-SPF, Flex-algo 128
p17 = re.compile(r'^Segment\s+Routing\s+Algorithms:\s+((?P<spf>SPF)?)'
r',\s+((?P<strict_spf>Strict-SPF)?),\s+Flex-algo\s+'
r'(?P<flex_id>\d+)$')
# Segment Routing Algorithms: Flex-algo 129, Flex-algo 130, Flex-algo 131
p18 = re.compile(r'^Segment\s+Routing\s+Algorithms:\s+Flex-algo\s+'
r'(?P<flex_id_1>\d+)((,\s+Flex-algo\s+(?P<flex_id_2>\d+))?)'
r'((,\s+Flex-algo\s+(?P<flex_id_3>\d+))?)$')
# Node-MSD
# | |
<reponame>google-cloud-sdk-unofficial/google-cloud-sdk
"""Generated client library for sddc version v1alpha1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.sddc.v1alpha1 import sddc_v1alpha1_messages as messages
class SddcV1alpha1(base_api.BaseApiClient):
"""Generated client library for service sddc version v1alpha1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://sddc.googleapis.com/'
MTLS_BASE_URL = 'https://sddc.mtls.googleapis.com/'
_PACKAGE = 'sddc'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v1alpha1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'SddcV1alpha1'
_URL_VERSION = 'v1alpha1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new sddc handle."""
url = url or self.BASE_URL
super(SddcV1alpha1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_locations_clusterGroupBackups = self.ProjectsLocationsClusterGroupBackupsService(self)
self.projects_locations_clusterGroups_clusters = self.ProjectsLocationsClusterGroupsClustersService(self)
self.projects_locations_clusterGroups_ipAddresses = self.ProjectsLocationsClusterGroupsIpAddressesService(self)
self.projects_locations_clusterGroups = self.ProjectsLocationsClusterGroupsService(self)
self.projects_locations_operations = self.ProjectsLocationsOperationsService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsClusterGroupBackupsService(base_api.BaseApiService):
"""Service class for the projects_locations_clusterGroupBackups resource."""
_NAME = 'projects_locations_clusterGroupBackups'
def __init__(self, client):
super(SddcV1alpha1.ProjectsLocationsClusterGroupBackupsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""`ClusterGroupBackup` is functional. A completed `longrunning.Operation` contains the new `ClusterGroupBackup` object in the response field. The returned operation is automatically deleted after a few hours, so there is no need to call `operations.delete`.
Args:
request: (SddcProjectsLocationsClusterGroupBackupsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroupBackups',
http_method='POST',
method_id='sddc.projects.locations.clusterGroupBackups.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['clusterGroupBackupId', 'requestId'],
relative_path='v1alpha1/{+parent}/clusterGroupBackups',
request_field='clusterGroupBackup',
request_type_name='SddcProjectsLocationsClusterGroupBackupsCreateRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a `ClusterGroupBackup`.
Args:
request: (SddcProjectsLocationsClusterGroupBackupsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroupBackups/{clusterGroupBackupsId}',
http_method='DELETE',
method_id='sddc.projects.locations.clusterGroupBackups.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['requestId'],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='SddcProjectsLocationsClusterGroupBackupsDeleteRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets details of a single `ClusterGroupBackup`.
Args:
request: (SddcProjectsLocationsClusterGroupBackupsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ClusterGroupBackup) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroupBackups/{clusterGroupBackupsId}',
http_method='GET',
method_id='sddc.projects.locations.clusterGroupBackups.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='SddcProjectsLocationsClusterGroupBackupsGetRequest',
response_type_name='ClusterGroupBackup',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists `ClusterGroupBackup` objects in a given project and location (region).
Args:
request: (SddcProjectsLocationsClusterGroupBackupsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListClusterGroupBackupsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroupBackups',
http_method='GET',
method_id='sddc.projects.locations.clusterGroupBackups.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+parent}/clusterGroupBackups',
request_field='',
request_type_name='SddcProjectsLocationsClusterGroupBackupsListRequest',
response_type_name='ListClusterGroupBackupsResponse',
supports_download=False,
)
class ProjectsLocationsClusterGroupsClustersService(base_api.BaseApiService):
"""Service class for the projects_locations_clusterGroups_clusters resource."""
_NAME = 'projects_locations_clusterGroups_clusters'
def __init__(self, client):
super(SddcV1alpha1.ProjectsLocationsClusterGroupsClustersService, self).__init__(client)
self._upload_configs = {
}
def AddNodes(self, request, global_params=None):
r"""Add bare metal nodes to a cluster.
Args:
request: (SddcProjectsLocationsClusterGroupsClustersAddNodesRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('AddNodes')
return self._RunMethod(
config, request, global_params=global_params)
AddNodes.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups/{clusterGroupsId}/clusters/{clustersId}:addNodes',
http_method='POST',
method_id='sddc.projects.locations.clusterGroups.clusters.addNodes',
ordered_params=['cluster'],
path_params=['cluster'],
query_params=[],
relative_path='v1alpha1/{+cluster}:addNodes',
request_field='addNodesRequest',
request_type_name='SddcProjectsLocationsClusterGroupsClustersAddNodesRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Create(self, request, global_params=None):
r"""Creates a new cluster in a given cluster group. The creation is asynchronous. You can check the returned operation to track its progress. When the operation successfully completes, the cluster has a a **READY** status and is fully functional. The returned operation is automatically deleted after a few hours, so there is no need to call `operations.delete`.
Args:
request: (SddcProjectsLocationsClusterGroupsClustersCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups/{clusterGroupsId}/clusters',
http_method='POST',
method_id='sddc.projects.locations.clusterGroups.clusters.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['clusterId', 'managementCluster'],
relative_path='v1alpha1/{+parent}/clusters',
request_field='cluster',
request_type_name='SddcProjectsLocationsClusterGroupsClustersCreateRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a cluster.
Args:
request: (SddcProjectsLocationsClusterGroupsClustersDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups/{clusterGroupsId}/clusters/{clustersId}',
http_method='DELETE',
method_id='sddc.projects.locations.clusterGroups.clusters.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='SddcProjectsLocationsClusterGroupsClustersDeleteRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets details of a single cluster.
Args:
request: (SddcProjectsLocationsClusterGroupsClustersGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Cluster) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups/{clusterGroupsId}/clusters/{clustersId}',
http_method='GET',
method_id='sddc.projects.locations.clusterGroups.clusters.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='SddcProjectsLocationsClusterGroupsClustersGetRequest',
response_type_name='Cluster',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists clusters in a given cluster group.
Args:
request: (SddcProjectsLocationsClusterGroupsClustersListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListClustersResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups/{clusterGroupsId}/clusters',
http_method='GET',
method_id='sddc.projects.locations.clusterGroups.clusters.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+parent}/clusters',
request_field='',
request_type_name='SddcProjectsLocationsClusterGroupsClustersListRequest',
response_type_name='ListClustersResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates labels of a specific cluster.
Args:
request: (SddcProjectsLocationsClusterGroupsClustersPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups/{clusterGroupsId}/clusters/{clustersId}',
http_method='PATCH',
method_id='sddc.projects.locations.clusterGroups.clusters.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v1alpha1/{+name}',
request_field='cluster',
request_type_name='SddcProjectsLocationsClusterGroupsClustersPatchRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def RemoveNodes(self, request, global_params=None):
r"""Remove bare metal nodes from a cluster.
Args:
request: (SddcProjectsLocationsClusterGroupsClustersRemoveNodesRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('RemoveNodes')
return self._RunMethod(
config, request, global_params=global_params)
RemoveNodes.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups/{clusterGroupsId}/clusters/{clustersId}:removeNodes',
http_method='POST',
method_id='sddc.projects.locations.clusterGroups.clusters.removeNodes',
ordered_params=['cluster'],
path_params=['cluster'],
query_params=[],
relative_path='v1alpha1/{+cluster}:removeNodes',
request_field='removeNodesRequest',
request_type_name='SddcProjectsLocationsClusterGroupsClustersRemoveNodesRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
class ProjectsLocationsClusterGroupsIpAddressesService(base_api.BaseApiService):
"""Service class for the projects_locations_clusterGroups_ipAddresses resource."""
_NAME = 'projects_locations_clusterGroups_ipAddresses'
def __init__(self, client):
super(SddcV1alpha1.ProjectsLocationsClusterGroupsIpAddressesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new `IpAddress` in a given `ClusterGroup`. The creation is asynchronous. You can check the returned operation to track its progress. When the operation successfully completes, the cluster is fully functional. The returned operation is automatically deleted after a few hours, so there is no need to call `DeleteOperation`.
Args:
request: (SddcProjectsLocationsClusterGroupsIpAddressesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups/{clusterGroupsId}/ipAddresses',
http_method='POST',
method_id='sddc.projects.locations.clusterGroups.ipAddresses.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['ipAddressId'],
relative_path='v1alpha1/{+parent}/ipAddresses',
request_field='ipAddress',
request_type_name='SddcProjectsLocationsClusterGroupsIpAddressesCreateRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes an `IpAddress` in a given `ClusterGroup`.
Args:
request: (SddcProjectsLocationsClusterGroupsIpAddressesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups/{clusterGroupsId}/ipAddresses/{ipAddressesId}',
http_method='DELETE',
method_id='sddc.projects.locations.clusterGroups.ipAddresses.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='SddcProjectsLocationsClusterGroupsIpAddressesDeleteRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the details of a single `IpAddress`.
Args:
request: (SddcProjectsLocationsClusterGroupsIpAddressesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(IpAddress) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups/{clusterGroupsId}/ipAddresses/{ipAddressesId}',
http_method='GET',
method_id='sddc.projects.locations.clusterGroups.ipAddresses.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='SddcProjectsLocationsClusterGroupsIpAddressesGetRequest',
response_type_name='IpAddress',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists `IpAddress` objects in a given `ClusterGroup`.
Args:
request: (SddcProjectsLocationsClusterGroupsIpAddressesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListIpAddressesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups/{clusterGroupsId}/ipAddresses',
http_method='GET',
method_id='sddc.projects.locations.clusterGroups.ipAddresses.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+parent}/ipAddresses',
request_field='',
request_type_name='SddcProjectsLocationsClusterGroupsIpAddressesListRequest',
response_type_name='ListIpAddressesResponse',
supports_download=False,
)
class ProjectsLocationsClusterGroupsService(base_api.BaseApiService):
"""Service class for the projects_locations_clusterGroups resource."""
_NAME = 'projects_locations_clusterGroups'
def __init__(self, client):
super(SddcV1alpha1.ProjectsLocationsClusterGroupsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new `ClusterGroup` in a given project and location (region). The creation is asynchronous. You can check the returned operation to track its progress. When the operation successfully completes, the new `ClusterGroup` is fully functional. The returned operation is automatically deleted after a few hours, so there is no need to call `DeleteOperation`.
Args:
request: (SddcProjectsLocationsClusterGroupsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups',
http_method='POST',
method_id='sddc.projects.locations.clusterGroups.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['clusterGroupId'],
relative_path='v1alpha1/{+parent}/clusterGroups',
request_field='clusterGroup',
request_type_name='SddcProjectsLocationsClusterGroupsCreateRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a `ClusterGroup`.
Args:
request: (SddcProjectsLocationsClusterGroupsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/clusterGroups/{clusterGroupsId}',
http_method='DELETE',
method_id='sddc.projects.locations.clusterGroups.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['requestId'],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='SddcProjectsLocationsClusterGroupsDeleteRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def GenerateSupportBundle(self, request, global_params=None):
r"""Consumer API (private) to generate support bundles of VMware stack.
Args:
request: (SddcProjectsLocationsClusterGroupsGenerateSupportBundleRequest) input message
global_params: (StandardQueryParameters, default: | |
in the local history
Arguments:
* operation -- the operation source
* history_content -- the content needed to keep
"""
settings = context.get_settings()
if not settings["keep_operation_history"]: return
splits = operation.split("/")
if len(splits) == 1:
folder, operation = "", splits[0]
elif len(splits) == 2:
folder, operation = splits
outputdir = settings["workspace"] + "/.history/" + folder
if not os.path.exists(outputdir):
os.makedirs(outputdir)
time_stamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
history = "%s\n```java\n%s\n```\n\n" % (time_stamp, history_content)
fp = open(outputdir + "/%s.md" % operation, "ab")
fp.write(history.encode("utf-8"))
fp.close()
def add_config_history(operation, content, settings, ext="json"):
"""Keep the history in the local history
Arguments:
* operation -- the operation source
* history_content -- the content needed to keep
"""
outputdir = os.path.join(settings["workspace"], ".config")
if not os.path.exists(outputdir):
os.makedirs(outputdir)
with open(outputdir + "/%s.%s" % (operation, ext), "w") as fp:
fp.write(json.dumps(content, indent=4))
# After write the file to local, refresh sidebar
sublime.set_timeout(lambda:sublime.active_window().run_command('refresh_folder_list'), 200);
sublime.set_timeout(lambda:sublime.active_window().run_command('refresh_folder_list'), 1300);
def export_report_api(rootdir):
reports = []
for parent,dirnames,filenames in os.walk(rootdir):
for filename in filenames:
if not filename.endswith(".report"): continue
report_dir = parent + "/" + filename
report_folder = os.path.split(parent)[1]
report_name = filename.split(".")[0]
report_api = getUniqueElementValueFromXmlString(open(report_dir, "rb").read(), "name")
# report_dict[report_api] = report_name
reports.append({"name": report_name, "api": report_api, "folder": report_folder})
list2csv(rootdir + "/test.csv", reports)
def check_action_enabled():
"""If project in current date is not created, new component is not enabled
Returns:
* * -- whether project in current date is exist
"""
# Check project workspace is available
settings = context.get_settings()
if not os.path.exists(settings["workspace"]): return False
# Check whether describe_metadata request is finished
described_metadata = get_described_metadata(settings)
return described_metadata is not None
def get_view_by_name(view_name):
"""Get view by view name
Arguments:
* view_name -- name of view in sublime
Returns:
* view -- sublime open tab
"""
view = None
for win in sublime.windows():
for v in win.views():
if v.name() == view_name:
view = v
return view
def get_view_by_file_name(file_name):
"""
Get the view in the active window by the view_name
Arguments:
* view_id: view name
Returns:
* return: view
"""
view = None
for v in sublime.active_window().views():
if not v.file_name(): continue
if file_name in v.file_name():
view = v
return view
def get_view_by_id(view_id):
"""
Get the view in the active window by the view_id
* view_id: id of view
* return: view
"""
view = None
for v in sublime.active_window().views():
if not v.id(): continue
if v.id() == view_id:
view = v
return view
def get_child_types(parent_type):
""" Get child types by parent type
Parameter:
* parent_type -- Parent Metadata Object
Return:
* child_types -- Child Metadata Objects of parent
"""
settings = context.get_settings()
child_types = settings[parent_type].get("childXmlNames", [])
if isinstance(child_types, str):
child_types = [child_types]
return child_types
def parse_package_types(_types):
""" Build structure
From: {
"CustomObject": ["A__c", "B__c"],
"CustomField": ["A__c.A__c", "A__c.A1__c", "B__c.B__c"],
"ValidationRule": ["A__c.VR1", "B__c.BR2"]
"ApexClass": ["AClass", "BClass", "CClass"]
}
To: {
"CustomObject": {
"A__c": {
"CustomField": ["A.A__c", "A.B__c"],
"ValidationRule": ["A.VR1"]
},
"B__c": {
"CustomField": ["B__c.B__c"],
"ValidationRule": ["B__c.BR2"]
}
},
"ApexClass": ["A", "B", "C"]
}
"""
settings = context.get_settings()
package_types = {}
for _type, elements in _types.items():
attr = settings[_type]
_child_types = attr.get("childXmlNames", [])
# If _type is child type, for example,
# CustomField, ListView
if _type != attr["xmlName"]:
continue
# If no child XML
if not _child_types:
# If no elements, don't keep it
if not elements:
continue
# inFolder is false
if attr["inFolder"] == "false":
package_types[_type] = elements
else:
# Build structure as {folder: [elements]}
folder_elements = {}
for folder in [e for e in elements if "/" not in e]:
folder_elements[folder] = [
e for e in elements if e.startswith(folder) \
and "/" in e
]
package_types[_type] = folder_elements
continue
if isinstance(_child_types, str):
_child_types = [_child_types]
child_cache = {}
for _child_type in _child_types:
if _child_type not in _types:
continue
parent_to_children = {}
for parent in elements:
children = []
for _child_element in _types[_child_type]:
if _child_element.startswith(parent):
children.append(_child_element)
if children:
parent_to_children[parent] = children
if parent_to_children:
child_cache[_child_type] = parent_to_children
package_types[_type] = child_cache
# view = sublime.active_window().new_file()
# view.run_command("new_view", {
# "name": "test",
# "input": json.dumps(package_types)
# })
return package_types
def build_package_types(package_xml_content):
result = xmltodict.parse(package_xml_content)
elements = []
metadata_types = result["Package"]["types"]
# If there is only one types in package
if isinstance(metadata_types, dict):
metadata_types = [metadata_types]
types = {}
for t in metadata_types:
name = t["name"]
members = t["members"]
if isinstance(members, str):
types[name] = [members]
elif isinstance(members, list):
types[name] = members
return types
def build_folder_types(dirs):
""" Build folders_dict for folder refreshing
{
"ApexClass": ["*"],
"ApexTrigger": ["*"],
"CustomObject": ["*"]
}
"""
settings = context.get_settings()
dname = settings["default_project_name"]
types = {}
for _dir in dirs:
base, folder = os.path.split(_dir)
if folder not in settings: continue
if dname not in _dir: continue
xml_name = settings[folder]["xmlName"]
types[xml_name] = ["*"]
return types
def build_package_dict(files, ignore_folder=True):
""" Build Package Dict as follow structure by files
{
'ApexClass': [{
'dir': <file path>,
'folder': 'classes',
'name': 'AccountController',
'metadata_name': 'AccountController',
'extension': '.cls'
}],
'ApexComponent': [{
'dir': <file path>,
'folder': 'components',
'name': 'SiteFooter',
'metadata_name': 'SiteFooter',
'extension': '.component'
}]
}
"""
settings = context.get_settings()
package_dict = {}
for f in files:
# Ignore folder
if ignore_folder and not os.path.isfile(f):
continue
# Ignore "-meta.xml"
if f.endswith("-meta.xml"):
continue
# If ignore_folder is true and f is folder
attributes = get_file_attributes(f)
metadata_folder = attributes["metadata_folder"]
mo = settings[metadata_folder]
metadata_object = mo["xmlName"]
file_dict = {
"name": attributes["name"],
"metadata_name": attributes["name"],
"dir": f,
"folder": attributes["folder"] if "folder" in attributes else "",
"metadata_folder": attributes["metadata_folder"],
"extension": attributes["extension"]
}
if mo["inFolder"] == "true":
file_dict["metadata_name"] = "%s/%s" % (
attributes["folder"], attributes["name"]
)
if metadata_folder == "aura":
file_dict["metadata_name"] = "%s" % attributes["folder"]
# Build dict
if metadata_object in package_dict:
package_dict[metadata_object].append(file_dict)
else:
package_dict[metadata_object] = [file_dict]
return package_dict
def build_package_xml(settings, package_dict):
""" Build Package XML as follow structure
<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
<types>
<met:members>*</met:members>
<met:members>Account</met:members>
<name>CustomObject</name>
</types>
<version>32.0</version>
</Package>
"""
# Build types for package.xml
types = []
for meta_type, values in package_dict.items():
if values and "metadata_name" in values[0]:
members = ["<members>%s</members>" % v["metadata_name"] for v in values]
else:
members = ["<members>%s</members>" % v for v in values]
types.append("""
<types>
%s
<name>%s</name>
</types>
""" % (" ".join(members), meta_type))
# Build package.xml
package_xml_content = """<?xml version="1.0" encoding="UTF-8"?>
<Package xmlns="http://soap.sforce.com/2006/04/metadata">
%s
<version>%s.0</version>
</Package>
""" % (" ".join(types), settings["api_version"])
return package_xml_content
def build_destructive_package_by_files(files, ignore_folder=True):
settings = context.get_settings()
workspace = settings["workspace"]
if not os.path.exists(workspace):
os.makedirs(workspace)
# Constucture package dict
package_dict = build_package_dict(files, ignore_folder)
# Build destructiveChanges.xml
destructive_xml_content = build_package_xml(settings, package_dict)
destructive_xml_path = workspace+"/destructiveChanges.xml"
with open(destructive_xml_path, "wb") as fp:
fp.write(destructive_xml_content.encode("utf-8"))
# Build package.xml
package_xml_content = build_package_xml(settings, {})
package_xml_path = workspace+"/package.xml"
with open(package_xml_path, "wb") as fp:
fp.write(package_xml_content.encode("utf-8"))
# Create temp zipFile
zipfile_path = workspace + "/test.zip"
zf = zipfile.ZipFile(zipfile_path, "w", zipfile.ZIP_DEFLATED)
# Compress destructive_xml and package_xml into temp zipFile
# After that, close the input stream
zf.write(package_xml_path, "package.xml")
zf.write(destructive_xml_path, "destructiveChanges.xml")
zf.close()
# Remove temp files
os.remove(package_xml_path)
os.remove(destructive_xml_path)
# base64 encode zip package
base64_package = base64_encode(zipfile_path)
# Remove temporary `test.zip`
os.remove(zipfile_path)
return base64_package
def build_destructive_package_by_package_xml(types):
""" Build destructive package,
Arguments:
* types -- see below json:
{
"ApexClass": ["AClass", "BClass"],
"ApexTrigger": ["ATrigger", "BTrigger"],
...
}
Return:
* base64_encode -- base64 encode zip file,
which contains destructiveChanges.xml and package.xml
"""
settings = context.get_settings()
workspace = settings["workspace"]
# Build destructiveChanges.xml
destructive_xml_content = build_package_xml(settings, types)
destructive_xml_path = workspace+"/destructiveChanges.xml"
with open(destructive_xml_path, "wb") as fp:
fp.write(destructive_xml_content.encode("utf-8"))
# Build package.xml
package_xml_content = build_package_xml(settings, {})
package_xml_path = workspace+"/package.xml"
with open(package_xml_path, "wb") as fp:
fp.write(package_xml_content.encode("utf-8"))
# Create temp zipFile
zipfile_path = workspace + "/test.zip"
zf = zipfile.ZipFile(zipfile_path, "w", zipfile.ZIP_DEFLATED)
# Compress destructive_xml and package_xml into temp zipFile
# After that, close the input stream
zf.write(package_xml_path, "package.xml")
zf.write(destructive_xml_path, "destructiveChanges.xml")
zf.close()
# Remove temp files
os.remove(package_xml_path)
os.remove(destructive_xml_path)
# base64 encode zip package
base64_package = base64_encode(zipfile_path)
# Remove temporary `test.zip`
os.remove(zipfile_path)
return base64_package
def build_deploy_package(files):
# Initiate zipfile
settings = context.get_settings()
if not os.path.exists(settings["workspace"]):
os.makedirs(settings["workspace"])
zipfile_path = settings["workspace"] + "/test.zip"
zf = zipfile.ZipFile(zipfile_path, "w", zipfile.ZIP_DEFLATED)
# Get package dict
package_dict = build_package_dict(files)
# Add files to zip
for meta_type in package_dict:
for f in package_dict[meta_type]:
# Define write_to
write_to = (
f["metadata_folder"],
("/" + f["folder"]) if f["folder"] else "",
f["name"],
f["extension"]
)
# If lighting component, add | |
isinstance(op, StridedSliceOp):
# StridedSliceOps can have mask attributes
get_slice_attributes_from_op(tf_sess, tf_op, op)
elif isinstance(op, SplitOp):
get_split_attributes_from_op(tf_sess, tf_op, op)
elif isinstance(op, (Conv2DOp, Conv2DGradFilterOp,
Conv2DGradInputOp)):
get_conv_attributes_from_op(tf_sess, tf_op, op)
elif isinstance(op, (FusedBatchNormOp, FusedBatchNormGradOp)):
get_batch_norm_attributes_from_op(tf_sess, tf_op, op)
elif isinstance(op, PoolBaseOp):
get_pool_attributes_from_op(tf_sess, tf_op, op)
elif isinstance(op, (PackOp, UnpackOp)):
get_axis_attribute_from_op(tf_sess, tf_op, op)
elif isinstance(op, BatchMatMulOp):
get_batch_matmul_attributes_from_op(tf_sess, tf_op, op)
elif isinstance(op, EnterOp):
get_enter_frame_name_from_op(tf_sess, tf_op, op)
elif isinstance(op, SqueezeOp):
get_squeeze_dims_from_op(tf_sess, tf_op, op)
elif isinstance(op, ReduceOp):
get_keep_dims_from_op(tf_sess, tf_op, op)
# print(tf_op.op_def)
# print(tf_op.node_def)
def construct_catamount_graph(tf_sess, tf_graph):
graph = Graph()
tensors = {}
op_inputs = {}
ctrl_frames = {}
all_stack_ops = []
for tf_op in tf_graph._nodes_by_name.values():
if tf_op.type in TF_OP_TO_CATAMOUNT.keys():
# Map to Catamount op type
catamount_type = TF_OP_TO_CATAMOUNT[tf_op.type]
else:
print('WARN: Unknown op type: {} (op: {})'
.format(tf_op.type, tf_op.name))
catamount_type = UnknownOp
# Create the Catamount internal op
op = catamount_type(tf_op.name)
if catamount_type == ReduceOp:
reduce_op = None
if tf_op.type in TF_OP_TO_CATAMOUNT_REDUCE:
reduce_op = TF_OP_TO_CATAMOUNT_REDUCE[tf_op.type]
else:
print('WARN: Reduce may set reduction op: {}'.format(tf_op.type))
if reduce_op is not None:
op.setReductionOp(reduce_op)
if tf_op.type == 'BiasAddGrad':
op.setAxes(0)
if catamount_type == ScatterUpdateOp:
print('WARN: ScatterUpdate may set update op: {}'.format(tf_op.type))
# Create the output tensors for this op
for i in range(len(tf_op.outputs)):
tf_tensor = tf_op.outputs[i]
tf_dtype = tf_tensor.dtype.base_dtype
if tf_dtype in TF_DTYPE_TO_CATAMOUNT.keys():
catamount_dtype = TF_DTYPE_TO_CATAMOUNT[tf_dtype]
else:
print('WARN: Unknown dtype {} for tensor {}'
.format(tf_tensor.dtype, tf_tensor))
catamount_dtype = None
out_tens = Tensor(tf_tensor.name,
tf_shape_to_catamount(tf_tensor.shape), catamount_dtype)
tensors[out_tens.name] = out_tens
op.addOutput(out_tens)
# Track the input tensor names to connect them in next phase
op_inputs[op.name] = []
if tf_op.type == 'Split':
# TF Split op has different interface than Catamount. Need to add the
# size_splits tensor to match the Catamount interface (input[1])
assert len(tf_op.inputs) == 2
op_inputs[op.name].append(tf_op.inputs[1].name)
# Signal to Catamount to use the num_split attribute by setting
# size_splits equal to a scalar constant of value 0
size_splits = catamount.constant('{}_size_splits'.format(op.name),
out_shape=[], value=0, graph=graph)
tensors[size_splits.name] = size_splits
op_inputs[op.name].append(size_splits.name)
op_inputs[op.name].append(tf_op.inputs[0].name)
else:
for i in range(len(tf_op.inputs)):
op_inputs[op.name].append(tf_op.inputs[i].name)
# Get the tf_op's attributes and set them as necessary
parse_tf_op_attributes_into_op(tf_sess, tf_op, op)
if isinstance(op, EnterOp):
frame_name = op.getFrameName()
if frame_name not in ctrl_frames:
ctrl_frames[frame_name] = ContextFrame(frame_name)
ctrl_frames[frame_name].addEnterOp(op)
elif isinstance(op, StackOp):
all_stack_ops.append(op)
graph.addOp(op)
# Hook up all the op inputs to the ops that generate them
for op_name in op_inputs.keys():
op = graph.opsByName[op_name]
for in_tensor in op_inputs[op_name]:
assert in_tensor in tensors.keys(), \
'Unknown input tensor {}'.format(in_tensor)
graph.addInputToOp(op, tensors[in_tensor])
# Propagate stack pointers for StackOps. These ops always occur as a
# series of ops. The StackOp is first, and propagates its outputs to
# (optionally) EnterOps, and then to StackPush and StackPop ops. The
# StackPush and StackPop ops need to get the pointer for the stack
# created for the StackOp
for stack_op in all_stack_ops:
# Traverse out tensor to find all StackPush and StackPop
out_tensor = stack_op.outputs[0]
for cons_op in out_tensor.consumers.values():
while not isinstance(cons_op, BaseStackOp):
assert(isinstance(cons_op, (EnterOp, SwitchOp)))
assert(len(cons_op.outputs[0].consumers) == 1)
cons_ops = list(cons_op.outputs[0].consumers.values())
cons_op = cons_ops[0]
cons_op.setStack(stack_op.getStack())
assert(cons_op.getStack() is not None)
# Remove TF variable initialization (Assign) ops
# These are not necessary to fully specify the graph
assign_ops = set()
for op in graph.opsByName.values():
if isinstance(op, AssignOp):
assign_ops.add(op)
op_types = set()
for assign_op in assign_ops:
assert isinstance(assign_op.inputs[0].producer, VariableOp)
# assert isinstance(assign_op.inputs[1].producer, ConstantOp)
my_ancestors = set()
my_frontier = set()
my_frontier.add(assign_op)
while len(my_frontier) > 0:
next_op = my_frontier.pop()
for in_tensor in next_op.inputs:
if not isinstance(in_tensor.producer, VariableOp):
my_frontier.add(in_tensor.producer)
my_ancestors.add(next_op)
if len(my_ancestors) > 100:
break
if len(my_ancestors) <= 8:
op_types.update(set(type(op) for op in my_ancestors))
for next_op in my_ancestors:
graph.removeOp(next_op)
else:
print('WARN: Unable to remove: {}'.format(assign_op.debugString()))
print(' COUNT: {}'.format(len(my_ancestors)))
assert graph.isValid()
# Remove any Tensorflow model saver ops from the graph. These ops
# always occur as a series of 6 ops:
# 1) Three ConstOps that define the (A) the name of the model, (B) the
# names of saved tensors, and (C) the sizes/shapes of saved tensors.
# 2) Save and Restore ops, which takes the above inputs 0-2
# 3) An AssignOp, which takes the above reference, and if appropriate,
# loads tensor data from the checkpoint and assigns the ref to it.
# 4) A control dependency op (IdentityOp) that takes the model name
# op as input and has no outputs
ops_to_remove = set()
saver_ops = []
model_name_ops = set()
for op in graph.opsByName.values():
if isinstance(op, (TFRestoreOp, TFSaveOp)):
saver_ops.append(op)
ops_to_remove.add(op)
if op.inputs[0].producer not in model_name_ops:
model_name_ops.add(op.inputs[0].producer)
for saver_op in saver_ops:
if isinstance(saver_op, TFRestoreOp):
assert len(saver_op.inputs) == 3
else:
assert isinstance(saver_op, TFSaveOp)
# First 3 inputs are config inputs
assert len(saver_op.inputs) >= 3
assert len(saver_op.outputs) == 0
# Get input ops and trace back to through consts and idents
parent_ops_to_trace_and_remove = []
for idx in range(3):
in_tensor = saver_op.inputs[idx]
input_op = in_tensor.producer
ops_to_remove.add(input_op)
if len(input_op.inputs) > 0:
assert isinstance(input_op, (IdentityOp, UnknownOp)), \
'Not ident or unk: {}'.format(input_op.debugString())
parent_ops_to_trace_and_remove.append(input_op)
else:
assert isinstance(input_op, (ConstantOp, IdentityOp)), \
'Not const: {}'.format(input_op.debugString())
while len(parent_ops_to_trace_and_remove) > 0:
parent_op = parent_ops_to_trace_and_remove.pop(0)
for in_tensor in parent_op.inputs:
input_op = in_tensor.producer
ops_to_remove.add(input_op)
if len(input_op.inputs) > 0:
assert isinstance(input_op, (IdentityOp, UnknownOp)), \
'Not ident or unk: {}'.format(input_op.debugString())
parent_ops_to_trace_and_remove.append(input_op)
else:
assert isinstance(input_op, ConstantOp), \
'Not const: {}'.format(input_op.debugString())
if isinstance(saver_op, TFRestoreOp):
assert len(saver_op.outputs) >= 1
# Restore ops can package all tensors together into a single
# op, so need to traverse all outputs to their assign ops
child_ops_to_trace_and_remove = []
for out_tensor in saver_op.outputs:
assert len(out_tensor.consumers) == 1
output_op = list(out_tensor.consumers.values())[0]
ops_to_remove.add(output_op)
if isinstance(output_op, AssignOp):
assert len(output_op.outputs) == 1
assert len(output_op.outputs[0].consumers) == 0
else:
assert isinstance(output_op, IdentityOp)
child_ops_to_trace_and_remove.append(output_op)
while len(child_ops_to_trace_and_remove) > 0:
child_op = child_ops_to_trace_and_remove.pop(0)
ops_to_remove.add(child_op)
for child_tens in child_op.outputs:
for next_op in child_tens.consumers.values():
assert isinstance(next_op, (AssignOp, UnknownOp))
child_ops_to_trace_and_remove.append(next_op)
model_name_consumers = []
for model_name_op in model_name_ops:
assert len(model_name_op.outputs) == 1
model_name_consumers.extend(
model_name_op.outputs[0].consumers.values())
for saver_op in model_name_consumers:
if saver_op not in ops_to_remove:
# Only other op to catch is the control dependency op, which
# is an IdentityOp and has no outputs
if isinstance(saver_op, IdentityOp):
assert len(saver_op.outputs) == 1
assert len(saver_op.outputs[0].consumers) == 0
else:
print('WARN: Unknown model_name_consumer: {}'
.format(saver_op.debugString()))
ops_to_remove.add(saver_op)
for op in ops_to_remove:
graph.removeOp(op)
assert graph.isValid()
# Traverse the graph to find subgraph ops, such as loops
# NOTES:
# 1) TF while loops are controlled by a LoopConditionOp, which gates
# all the SwitchOps that allow a loop iteration to proceed. The
# inputs to a LoopConditionOp can be part of the condition function
# passed to tf.while_loop. However, the condition function cannot
# create side-effects (which is an important observation for
# identifying the condition subgraph).
# 2) The condition subgraph is defined as all inputs to the while loop
# that are not updated during the loop body and outputs of MergeOps
# that are used to evaluate the loop condition function.
# 3) Loops create a loop-iteration versioning context for each variable
# that is explicitly input into the while condition or body
# functions (but NOT variables/tensors that are accessed locally or
# globally for evaluating the condition).
# 4) The body of the loop is all ops that occur between any IdentityOp
# and any NextIterationOp from the variable contexts for the loop.
# Final) Note that TF while loops can have nested while loops or other
# control flow blocks, so we need to design this recursively.
control_ops = []
# Find the ops that will require subgraph designations (i.e., control)
for op_name, op in graph.opsByName.items():
if op.isControlOp():
control_ops.append(op)
assert len(control_ops) == len(ctrl_frames)
for ctrl_op in control_ops:
# Get all ops for the loop condition value calculation (1 and 2),
# the variable contexts (3), and the loop body (4). Extract these
# into a subgraph.
ctrl_block_frame = None
subgraph_ops = [ctrl_op]
visited_ops = set(subgraph_ops)
enter_ops = set()
exit_ops = set()
frontier_ops = []
for out_tensor in ctrl_op.outputs:
for consumer in out_tensor.consumers.values():
assert isinstance(consumer, SwitchOp)
frontier_ops.extend(out_tensor.consumers.values())
# A) Traverse backward from SwitchOps to MergeOps and EnterOps,
# and NextIterationOps. Stop at the LoopConditionOp, and any
# NextIterationOps and EnterOps. Add MergeOps to the frontier
# to traverse forward | |
<reponame>evereux/catia_python
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.system_interfaces.setting_controller import SettingController
class ToleranceSheetSettingAtt(SettingController):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| System.SettingController
| ToleranceSheetSettingAtt
|
| The interface to access a CATIAToleranceSheetSettingAtt.
| This interface may be used to read or modify in the CATIA/Tools/Option the
| settings values of Tolerance sheet.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.tolerance_sheet_setting_att = com_object
@property
def angle_max_tolerance(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property AngleMaxTolerance() As double
|
| Returns or sets the AngleMaxTolerance parameter.
| Role:Return or Set the AngleMaxTolerance parameter if it is possible in the
| current administrative context. In user mode this method will always return
| E_FAIL.
|
| Parameters:
|
| oAngleMaxTolerance
| The angle maximum tolerance value.
:return: float
:rtype: float
"""
return self.tolerance_sheet_setting_att.AngleMaxTolerance
@angle_max_tolerance.setter
def angle_max_tolerance(self, value: float):
"""
:param float value:
"""
self.tolerance_sheet_setting_att.AngleMaxTolerance = value
@property
def angle_min_tolerance(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property AngleMinTolerance() As double
|
| Returns or sets the AngleMinTolerance parameter.
| Role:Return or Set the AngleMinTolerance parameter if it is possible in the
| current administrative context. In user mode this method will always return
| E_FAIL.
|
| Parameters:
|
| oAngleMinTolerance
| The angle minimum tolerance value.
:return: float
:rtype: float
"""
return self.tolerance_sheet_setting_att.AngleMinTolerance
@angle_min_tolerance.setter
def angle_min_tolerance(self, value: float):
"""
:param float value:
"""
self.tolerance_sheet_setting_att.AngleMinTolerance = value
@property
def default_tolerance(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property DefaultTolerance() As short
|
| Returns or sets the DefaultTolerance parameter.
| Role:Return or Set the DefaultTolerance parameter if it is possible in the
| current administrative context. In user mode this method will always return
| E_FAIL.
|
| Parameters:
|
| oDefaultTolerance
| Legal values:
| 0 : to not accept a default tolerance
| 1 : to accept a default tolerance.
:return: int
:rtype: int
"""
return self.tolerance_sheet_setting_att.DefaultTolerance
@default_tolerance.setter
def default_tolerance(self, value: int):
"""
:param int value:
"""
self.tolerance_sheet_setting_att.DefaultTolerance = value
@property
def length_max_tolerance(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property LengthMaxTolerance() As double
|
| Returns or sets the LengthMaxTolerance parameter.
| Role:Return or Set the LengthMaxTolerance parameter if it is possible in
| the current administrative context. In user mode this method will always return
| E_FAIL.
|
| Parameters:
|
| oLengthMaxTolerance
| The length maximum tolerance value.
:return: float
:rtype: float
"""
return self.tolerance_sheet_setting_att.LengthMaxTolerance
@length_max_tolerance.setter
def length_max_tolerance(self, value: float):
"""
:param float value:
"""
self.tolerance_sheet_setting_att.LengthMaxTolerance = value
@property
def length_min_tolerance(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property LengthMinTolerance() As double
|
| Returns or sets the LengthMinTolerance parameter.
| Role:Return or Set the LengthMinTolerance parameter if it is possible in
| the current administrative context. In user mode this method will always return
| E_FAIL.
|
| Parameters:
|
| oLengthMinTolerance
| The length minimum tolerance value.
:return: float
:rtype: float
"""
return self.tolerance_sheet_setting_att.LengthMinTolerance
@length_min_tolerance.setter
def length_min_tolerance(self, value: float):
"""
:param float value:
"""
self.tolerance_sheet_setting_att.LengthMinTolerance = value
def get_angle_max_tolerance_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func GetAngleMaxToleranceInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the AngleMaxTolerance
| parameter.
| Role:Retrieves the state of the AngleMaxTolerance parameter in the current
| environment.
|
| Parameters:
|
| ioAdminLevel
|
| If the parameter is locked, AdminLevel gives the administration
| level that imposes the value of the parameter.
| If the parameter is not locked, AdminLevel gives the administration
| level that will give the value of the parameter after a reset.
|
| ioLocked
| Indicates if the parameter has been locked.
|
| Returns:
| Indicates if the parameter has been explicitly modified or remain to
| the administrated value.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.tolerance_sheet_setting_att.GetAngleMaxToleranceInfo(io_admin_level, io_locked)
def get_angle_min_tolerance_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func GetAngleMinToleranceInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the AngleMinTolerance
| parameter.
| Role:Retrieves the state of the AngleMinTolerance parameter in the current
| environment.
|
| Parameters:
|
| ioAdminLevel
|
| If the parameter is locked, AdminLevel gives the administration
| level that imposes the value of the parameter.
| If the parameter is not locked, AdminLevel gives the administration
| level that will give the value of the parameter after a reset.
|
| ioLocked
| Indicates if the parameter has been locked.
|
| Returns:
| Indicates if the parameter has been explicitly modified or remain to
| the administrated value.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.tolerance_sheet_setting_att.GetAngleMinToleranceInfo(io_admin_level, io_locked)
def get_default_tolerance_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func GetDefaultToleranceInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the DefaultTolerance
| parameter.
| Role:Retrieves the state of the DefaultTolerance parameter in the current
| environment.
|
| Parameters:
|
| ioAdminLevel
|
| If the parameter is locked, AdminLevel gives the administration
| level that imposes the value of the parameter.
| If the parameter is not locked, AdminLevel gives the administration
| level that will give the value of the parameter after a reset.
|
| ioLocked
| Indicates if the parameter has been locked.
|
| Returns:
| Indicates if the parameter has been explicitly modified or remain to
| the administrated value.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.tolerance_sheet_setting_att.GetDefaultToleranceInfo(io_admin_level, io_locked)
def get_length_max_tolerance_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func GetLengthMaxToleranceInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the LengthMaxTolerance
| parameter.
| Role:Retrieves the state of the LengthMaxTolerance parameter in the current
| environment.
|
| Parameters:
|
| ioAdminLevel
|
| If the parameter is locked, AdminLevel gives the administration
| level that imposes the value of the parameter.
| If the parameter is not locked, AdminLevel gives the administration
| level that will give the value of the parameter after a reset.
|
| ioLocked
| Indicates if the parameter has been locked.
|
| Returns:
| Indicates if the parameter has been explicitly modified or remain to
| the administrated value.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.tolerance_sheet_setting_att.GetLengthMaxToleranceInfo(io_admin_level, io_locked)
def get_length_min_tolerance_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func GetLengthMinToleranceInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the LengthMinTolerance
| parameter.
| Role:Retrieves the state of the LengthMinTolerance parameter in the current
| environment.
|
| Parameters:
|
| | |
new_df.iloc[:, :split_id].copy(deep=True)
to_split = df[[col_name]].copy(deep=True)
to_join = df.iloc[:, (split_id+1):].copy(deep=True)
cols_to_add = np.hstack((org_col_index[(split_id+1):], ''))
to_join[''] = pd.Series([np.NaN]*to_join.shape[0])
#split the column:
to_split = to_split[col_name].str.split(self.regex_string, n=1, expand=True)
to_split.columns = [col_name, cols_to_add[0]]
# join the split column back first
#to_keep = pd.concat([to_keep, to_split], axis=1)
to_keep = to_keep.join(to_split)
na_boolean = to_keep[cols_to_add[0]].isna()
to_keep = to_keep.combine_first(to_join[[cols_to_add[0]]])
#breakpoint()
for i in np.arange(1,len(cols_to_add)):
#print(i)
new_col = to_join.iloc[:, i].copy(deep=True)
new_col[~na_boolean] = to_join.iloc[:, i-1][~na_boolean]
to_keep = pd.concat([to_keep, new_col], axis=1)
#print(cols_to_add[i])
#print(cols_to_add)
#return to_keep[np.hstack((org_col_index, ''))]
# create col-map:
cmap = {}
for ii in np.arange(df.shape[1]):
cmap[ii] = [ii]
cmap[df.shape[1]-1] = [df.shape[1]-1, df.shape[1]]
# print(cmap)
end = time()
self.update_history(f"Column id {col_name} split into two.", end-start)
self.update_history(message = f"New dataframe has {to_keep.shape[1]} columns now.")
return to_keep[np.hstack((org_col_index, ''))],{},cmap
class ColumnJoiner(Stainer):
""" Stainer to join text columns, creating a ragged DataFrame """
def __init__(self, name = "Column splitter", row_idx =[], col_idx = []):
""" Constructor for ColumnJoiner
Parameters
----------
name: str, optional.
Name of stainer.
col_idx: int list, required. This has to contain two consecutive integers. These are
the columns that will be catenated.
row_idx: int list, required. These will be the rows that will be "shifted" inwards.
Raises
------
ValueError
If col_idx has length not equals to two, or the values are not consecutive.
ValueError
If row_idx is empty.
"""
if ((len(col_idx) != 2) or (col_idx[1] - col_idx[0] != 1)):
raise ValueError("col_idx must contain two consecutive integers.")
if(len(row_idx) == 0):
raise ValueError("row_idx should not be empty.")
super().__init__(name, row_idx, col_idx)
def transform(self, df, rng, row_idx=None, col_idx=None):
"""Applies staining on the given indices in the provided dataframe.
Parameters
----------
df : pd.DataFrame
Dataframe to be transformed.
rng : np.random.BitGenerator
PCG64 pseudo-random number generator. Unused by this stainer.
row_idx : int list, optional
Unused parameter.
col_idx : int list, optional
Unused parameter.
Returns
-------
new_df : pd.DataFrame
Modified dataframe, with some columns shifted "inwards"
row_map : empty dictionary.
col_map : empty dictionary.
>>> rng = np.random.default_rng(12)
>>> x = pd.DataFrame({'label': ['T-LIGHT', 'ASHTRAY'], 'dept':['A1', 'A2'], 'price': [2.30, 3.20]})
>>> print(x)
label dept price
0 T-LIGHT A1 2.3
1 ASHTRAY A2 3.2
>>> cj1 = ColumnJoiner('test joiner', [1], [0,1])
>>> print(cj1.transform(x, rng))
label dept price
0 T-LIGHT A1 2.3
1 ASHTRAYA2 3.2 NaN
"""
new_df, row_idx, col_idx = self._init_transform(df, row_idx, col_idx)
start = time()
org_col_index = new_df.columns
col_names = org_col_index[col_idx]
split_id = np.argwhere(org_col_index == col_names[1]).reshape(1)[0]
to_keep = new_df.loc[:, :col_names[0]].copy(deep=True)
join_series = new_df[col_names[1]].copy(deep=True)
to_join = new_df.iloc[:, split_id:].copy(deep=True)
# breakpoint()
# modify the column to join, and paste with to_keep
join_series[~join_series.index.isin(row_idx)] = ''
to_keep[col_names[0]] = to_keep[col_names[0]] + join_series
for i in np.arange(split_id, new_df.shape[1]):
# print(i)
new_col = new_df.iloc[:, i].copy(deep=True)
if i < (new_df.shape[1] - 1):
new_col[row_idx] = new_df.iloc[:, i+1][row_idx]
else:
new_col[row_idx] = np.NaN
to_keep = pd.concat([to_keep, new_col], axis=1)
#breakpoint()
end = time()
self.update_history(f"Column id {col_names[0]} and {col_names[1]} joined at certain rows.", end-start)
return to_keep,{},{}
class ResidualResampler(Stainer):
""" Stainer to resample residuals from a linear model and return new y's. """
def __init__(self, name = "Residual resampler", row_idx =[], col_idx = []):
""" Constructor for ResidualResampler
Parameters
----------
name: str, optional.
Name of stainer.
col_idx: int list, required. This should specify at least two columns. The first will
be used as the y-variable, and the others will be used as the X matrix.
row_idx: int list, unused.
Raises
------
ValueError
If col_idx has length not equals to two, or the values are not consecutive.
"""
if len(col_idx) < 2:
raise ValueError("col_idx must contain at least two integers.")
super().__init__(name, row_idx, col_idx)
def transform(self, df, rng, row_idx=None, col_idx=None):
"""Applies staining on the given indices in the provided dataframe.
A ordinary least squares linear model is fit, using statsmodels. The residuals are then
sampled from using rand_from_Finv (from ddf.samplers) and added back to the fitted y-hats
to create a new set of y-values.
This stainer should result in a similar fit, but slightly different diagnostics/statistics.
The user should check if the new y-values are valid or not (e.g. are they negative when they
shouldn't be?)
Parameters
----------
df : pd.DataFrame
Dataframe to be transformed.
rng : np.random.BitGenerator
PCG64 pseudo-random number generator.
row_idx : int list, optional
Unused parameter.
col_idx : int list, optional
Unused parameter.
Returns
-------
new_df : pd.DataFrame
Modified dataframe, with some columns shifted "inwards"
row_map : empty dictionary.
col_map : empty dictionary.
>>> rng = np.random.default_rng(12)
>>> x = np.arange(1, 11)
>>> y = x*2.3 + 3 + rng.normal(scale=1.6, size=10)
>>> org_df = pd.DataFrame({'x':x, 'y':y})
>>> rr = ResidualResampler('test rr', [], [1,0])
>>> new_df = rr.transform(org_df, rng)
>>> print(pd.concat((org_df, new_df), axis=1))
x y x y
0 1 5.289077 1 4.155549
1 2 9.273829 2 8.747416
2 3 11.086541 3 8.444612
3 4 13.358330 4 11.250526
4 5 17.090042 5 14.005535
5 6 14.871107 6 16.610120
6 7 18.096871 7 18.940797
7 8 19.286939 8 23.466323
8 9 23.527596 9 21.974412
9 10 27.598022 10 23.545538
"""
new_df, row_idx, col_idx = self._init_transform(df, row_idx, col_idx)
start = time()
# drop missing values
col_names = new_df.columns[col_idx]
fit_df = new_df.iloc[:, col_idx].dropna()
X = add_constant(fit_df.iloc[:, 1:])
y = fit_df.iloc[:,0]
# fit and predict from the model
m = OLS(y, X)
o = m.fit()
new_resid = rand_from_Finv(o.resid, rng, size=len(o.resid))
new_y = o.predict(X) + new_resid
# only put new values where we could predict values; otherwise keep old y-values.
new_df.loc[y.index, col_names[0]] = new_y
end = time()
self.update_history(f"New y-values in column {col_idx[0]} by sampling from residual distribution.",
end-start)
return new_df,{},{}
class InsertOutliers(Stainer):
""" Stainer to insert outliers at influential points using a linear model. """
def __init__(self, name = "Inserts outliers", row_idx =[], col_idx = [], n=5):
""" Constructor for InsertOutliers
Parameters
----------
name: str, optional.
Name of stainer.
col_idx: int list, required. This should specify at least two columns. The first will
be used as the y-variable, and the others will be used as the X matrix.
row_idx: int list, unused.
n: number of outliers to insert. The default is 5.
Raises
------
ValueError
If col_idx has length not equals to two, or the values are not consecutive.
"""
if len(col_idx) < 2:
raise ValueError("col_idx must contain at least two integers.")
super().__init__(name, row_idx, col_idx)
self.n = n
def transform(self, df, rng, row_idx=None, col_idx=None):
"""Applies staining on the given indices in the provided dataframe.
A ordinary least squares linear model is fit, using statsmodels.
The 5 most influential points are identified (using their leverage).
The residuals for these 5 points are replaced by sampling from the 5% tails of the residual
distributions.
This stainer should result in a similar fit, but slightly different diagnostics/statistics.
The user should check if the new y-values are valid or not (e.g. are they negative when they
shouldn't be?)
Parameters
----------
df : pd.DataFrame
Dataframe to be transformed.
rng : np.random.BitGenerator
PCG64 pseudo-random number generator.
row_idx : int list, optional
Unused parameter.
col_idx : int list, optional
Unused parameter.
Returns
-------
new_df : pd.DataFrame
Modified dataframe, with some columns shifted "inwards"
row_map : empty dictionary.
col_map : empty dictionary.
>>> rng = np.random.default_rng(12)
>>> x = np.arange(1, 11)
>>> x[-2:] = [15,16]
>>> y = x*2 + 3 + rng.normal(scale=5, size=10)
>>> org_df = pd.DataFrame({'x':x, 'y':y})
>>> rr = InsertOutliers('test outliers', [], [1,0], n=2)
>>> new_df = rr.transform(org_df, rng)[0]
>>> print(pd.concat((org_df, new_df), axis=1))
x y x y
0 1 4.965866 1 4.965866
1 2 12.230716 2 12.230716
2 3 12.707942 3 12.707942
3 4 14.619783 4 14.619783
4 5 21.093881 5 21.093881
5 6 8.972209 6 8.972209
6 7 13.865223 7 13.865223
7 8 12.396684 8 12.396684
8 15 32.461237 15 39.217787
9 16 39.993818 16 27.541544
"""
new_df, row_idx, col_idx = | |
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not
a good choice for files intended to archive data or transport data
between machines with different endianness. Some of these problems can
be overcome by outputting the data as text files, at the expense of
speed and file size.
When fid is a file object, array contents are directly written to the
file, bypassing the file object's ``write`` method. As a result,
tofile cannot be used with files objects supporting compression (e.g.,
GzipFile) or file-like objects that do not support ``fileno()`` (e.g.,
BytesIO).
Availability
--------
Multiple GPUs, Multiple CPUs
"""
return self.__array__().tofile(fid=fid, sep=sep, format=format)
def tobytes(self, order="C"):
"""a.tobytes(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object is produced in C-order by default.
This behavior is controlled by the ``order`` parameter.
Parameters
----------
order : ``{'C', 'F', 'A'}``, optional
Controls the memory layout of the bytes object. 'C' means C-order,
'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is
Fortran contiguous, 'C' otherwise. Default is 'C'.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Availability
--------
Multiple GPUs, Multiple CPUs
"""
return self.__array__().tobytes(order=order)
def tolist(self):
"""a.tolist()
Return the array as an ``a.ndim``-levels deep nested list of Python
scalars.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible builtin Python
type, via the `~cunumeric.ndarray.item` function.
If ``a.ndim`` is 0, then since the depth of the nested list is 0, it
will not be a list at all, but a simple Python scalar.
Parameters
----------
None
Returns
-------
y : Any
The possibly nested list of array elements. (object, or list of
object, or list of list of object, or ...)
Notes
-----
The array may be recreated via ``a = cunumeric.array(a.tolist())``,
although this may sometimes lose precision.
Availability
--------
Multiple GPUs, Multiple CPUs
"""
return self.__array__().tolist()
def tostring(self, order="C"):
"""a.tostring(order='C')
A compatibility alias for `tobytes`, with exactly the same behavior.
Despite its name, it returns `bytes` not `str`.
Availability
--------
Multiple GPUs, Multiple CPUs
"""
return self.__array__().tostring(order=order)
def transpose(self, axes=None):
"""a.transpose(axes=None)
Returns a view of the array with axes transposed.
For a 1-D array this has no effect, as a transposed vector is simply
the same vector. To convert a 1-D array into a 2D column vector, an
additional dimension must be added. `np.atleast2d(a).T` achieves this,
as does `a[:, np.newaxis]`.
For a 2-D array, this is a standard matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None or tuple[int]
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
transpose : Equivalent function
ndarray.T : Array property returning the array transposed.
ndarray.reshape : Give a new shape to an array without changing its
data.
Availability
--------
Multiple GPUs, Multiple CPUs
"""
if self.ndim == 1:
return self
if axes is None:
axes = tuple(range(self.ndim - 1, -1, -1))
elif len(axes) != self.ndim:
raise ValueError(
"axes must be the same size as ndim for transpose"
)
return ndarray(shape=None, thunk=self._thunk.transpose(axes))
def flip(self, axis=None):
"""
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
axis : None or int or tuple[int], optional
Axis or axes along which to flip over. The default, axis=None, will
flip over all of the axes of the input array. If axis is negative
it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the
axes specified in the tuple.
Returns
-------
out : array_like
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
Availability
--------
Single GPU, Single CPU
"""
result = ndarray(
shape=self.shape,
dtype=self.dtype,
inputs=(self,),
)
result._thunk.flip(self._thunk, axis)
return result
def view(self, dtype=None, type=None):
if dtype is not None and dtype != self.dtype:
raise NotImplementedError(
"cuNumeric does not currently support type reinterpretation"
)
return ndarray(shape=self.shape, dtype=self.dtype, thunk=self._thunk)
def unique(self):
"""a.unique()
Find the unique elements of an array.
Refer to :func:`cunumeric.unique` for full documentation.
See Also
--------
cunumeric.unique : equivalent function
Availability
--------
Multiple GPUs, Multiple CPUs
"""
thunk = self._thunk.unique()
return ndarray(shape=thunk.shape, thunk=thunk)
@classmethod
def _get_where_thunk(cls, where, out_shape):
if where is True:
return True
if where is False:
raise RuntimeError("should have caught this earlier")
if not isinstance(where, ndarray) or where.dtype != np.bool_:
raise RuntimeError("should have converted this earlier")
if where.shape != out_shape:
raise ValueError("where parameter must have same shape as output")
return where._thunk
@staticmethod
def find_common_type(*args):
"""Determine common type following standard coercion rules.
Parameters
----------
\\*args :
A list of dtypes or dtype convertible objects representing arrays
or scalars.
Returns
-------
datatype : data-type
The common data type, which is the maximum of the array types,
ignoring any scalar types , unless the maximum scalar type is of a
different kind (`dtype.kind`). If the kind is not understood, then
None is returned.
"""
array_types = list()
scalar_types = list()
for array in args:
if array.size == 1:
scalar_types.append(array.dtype)
else:
array_types.append(array.dtype)
return np.find_common_type(array_types, scalar_types)
def _maybe_convert(self, dtype, hints):
if self.dtype == dtype:
return self
copy = ndarray(shape=self.shape, dtype=dtype, inputs=hints)
copy._thunk.convert(self._thunk)
return copy
# For performing normal/broadcast unary operations
@classmethod
def _perform_unary_op(
cls,
op,
src,
dst=None,
extra_args=None,
dtype=None,
where=True,
out_dtype=None,
):
if dst is not None:
# If the shapes don't match see if we can broadcast
# This will raise an exception if they can't be broadcast together
if isinstance(where, ndarray):
np.broadcast_shapes(src.shape, dst.shape, where.shape)
else:
np.broadcast_shapes(src.shape, dst.shape)
else:
# No output yet, so make one
if isinstance(where, ndarray):
out_shape = np.broadcast_shapes(src.shape, where.shape)
else:
out_shape = src.shape
if dtype is not None:
dst = ndarray(
shape=out_shape,
dtype=dtype,
inputs=(src, where),
)
elif out_dtype is not None:
dst = ndarray(
shape=out_shape,
dtype=out_dtype,
inputs=(src, where),
)
else:
dst = ndarray(
shape=out_shape,
dtype=src.dtype
if src.dtype.kind != "c"
else np.dtype(np.float32)
if src.dtype == np.dtype(np.complex64)
else np.dtype(np.float64),
inputs=(src, where),
)
# Quick exit
if where is False:
return dst
if out_dtype is None:
if dst.dtype != src.dtype and not (
op == UnaryOpCode.ABSOLUTE and src.dtype.kind == "c"
):
temp = ndarray(
dst.shape,
dtype=src.dtype,
inputs=(src, where),
)
temp._thunk.unary_op(
op,
src._thunk,
cls._get_where_thunk(where, dst.shape),
extra_args,
)
dst._thunk.convert(temp._thunk)
else:
dst._thunk.unary_op(
op,
src._thunk,
cls._get_where_thunk(where, dst.shape),
extra_args,
)
else:
if dst.dtype != out_dtype:
temp = ndarray(
dst.shape,
dtype=out_dtype,
inputs=(src, where),
)
temp._thunk.unary_op(
op,
src._thunk,
cls._get_where_thunk(where, dst.shape),
extra_args,
)
dst._thunk.convert(temp._thunk)
else:
dst._thunk.unary_op(
op,
src._thunk,
cls._get_where_thunk(where, dst.shape),
extra_args,
)
return dst
# For performing reduction unary operations
@classmethod
def _perform_unary_reduction(
cls,
op,
src,
axis=None,
dtype=None,
res_dtype=None,
out=None,
keepdims=False,
args=None,
initial=None,
where=True,
):
# When 'res_dtype' is not None, the input and output of the reduction
# have different types. Such reduction operators don't take a dtype of
# the accumulator
if res_dtype is not None:
assert dtype is None
dtype = src.dtype
else:
# If 'dtype' exists, that determines both the accumulation dtype
# and the output dtype
if dtype is not None:
res_dtype = dtype
| |
<reponame>Martje55555/carla-rl-agent
"""Proximal Policy Optimization Agent"""
import os
import gym
import time
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import random
from typing import Union
from rl import utils
from rl.agents.agents import Agent
from rl.parameters import DynamicParameter
from rl.networks.networks import PPONetwork
from tensorflow.keras import losses
from tensorflow.keras.optimizers.schedules import LearningRateSchedule
class PPOAgent(Agent):
# TODO: dynamic-parameters: gamma, lambda, opt_steps, update_freq?, polyak?, clip_norm
# TODO: debug each action separately
# TODO: RNN support
def __init__(self, *args, policy_lr: Union[float, LearningRateSchedule, DynamicParameter] = 1e-3, gamma=0.99,
lambda_=0.95, value_lr: Union[float, LearningRateSchedule, DynamicParameter] = 3e-4, load=False,
optimization_steps=(1, 1), name='ppo-agent', optimizer='adam', clip_norm=(1.0, 1.0),
clip_ratio: Union[float, LearningRateSchedule, DynamicParameter] = 0.2, seed_regularization=False,
entropy_regularization: Union[float, LearningRateSchedule, DynamicParameter] = 0.0,
network: Union[dict, PPONetwork] = None, update_frequency=1, polyak=1.0, repeat_action=1,
advantage_scale: Union[float, LearningRateSchedule, DynamicParameter] = 2.0, **kwargs):
assert 0.0 < polyak <= 1.0
assert repeat_action >= 1
super().__init__(*args, name=name, **kwargs)
self.memory: PPOMemory = None
self.gamma = gamma
self.lambda_ = lambda_
self.repeat_action = repeat_action
self.adv_scale = DynamicParameter.create(value=advantage_scale)
if seed_regularization:
def _seed_regularization():
seed = random.randint(a=0, b=2**32 - 1)
self.set_random_seed(seed)
self.seed_regularization = _seed_regularization
self.seed_regularization()
else:
self.seed_regularization = lambda: None
# Entropy regularization
self.entropy_strength = DynamicParameter.create(value=entropy_regularization)
# Ratio clipping
if isinstance(clip_ratio, float):
assert clip_ratio >= 0.0
self.clip_ratio = DynamicParameter.create(value=clip_ratio)
# Action space
self._init_action_space()
print('state_spec:', self.state_spec)
print('action_shape:', self.num_actions)
print('distribution:', self.distribution_type)
# Gradient clipping:
self._init_gradient_clipping(clip_norm)
# Networks & Loading
self.weights_path = dict(policy=os.path.join(self.base_path, 'policy_net'),
value=os.path.join(self.base_path, 'value_net'))
if isinstance(network, dict):
network_class = network.pop('network', PPONetwork)
if network_class is PPONetwork:
# policy/value-specific arguments
policy_args = network.pop('policy', {})
value_args = network.pop('value', policy_args)
# common arguments
for k, v in network.items():
if k not in policy_args:
policy_args[k] = v
if k not in value_args:
value_args[k] = v
self.network = network_class(agent=self, policy=policy_args, value=value_args, **network)
else:
self.network = network_class(agent=self, **network)
else:
self.network = PPONetwork(agent=self, policy={}, value={})
# Optimization
self.update_frequency = update_frequency
self.policy_lr = DynamicParameter.create(value=policy_lr)
self.value_lr = DynamicParameter.create(value=value_lr)
self.optimization_steps = dict(policy=optimization_steps[0], value=optimization_steps[1])
self.policy_optimizer = utils.get_optimizer_by_name(optimizer, learning_rate=self.policy_lr)
self.value_optimizer = utils.get_optimizer_by_name(optimizer, learning_rate=self.value_lr)
self.should_polyak_average = polyak < 1.0
self.polyak_coeff = polyak
if load:
self.load()
def _init_gradient_clipping(self, clip_norm: Union[tuple, float, None]):
if clip_norm is None:
self.should_clip_policy_grads = False
self.should_clip_value_grads = False
elif isinstance(clip_norm, float):
assert clip_norm > 0.0
self.should_clip_policy_grads = True
self.should_clip_value_grads = True
self.grad_norm_policy = clip_norm
self.grad_norm_value = clip_norm
else:
assert isinstance(clip_norm, tuple)
if clip_norm[0] is None:
self.should_clip_policy_grads = False
else:
assert isinstance(clip_norm[0], float)
assert clip_norm[0] > 0.0
self.should_clip_policy_grads = True
self.grad_norm_policy = tf.constant(clip_norm[0], dtype=tf.float32)
if clip_norm[1] is None:
self.should_clip_value_grads = False
else:
assert isinstance(clip_norm[1], float)
assert clip_norm[1] > 0.0
self.should_clip_value_grads = True
self.grad_norm_value = tf.constant(clip_norm[1], dtype=tf.float32)
# TODO: handle complex action spaces (make use of Agent.action_spec)
def _init_action_space(self):
action_space = self.env.action_space
if isinstance(action_space, gym.spaces.Box):
self.num_actions = action_space.shape[0]
# continuous:
if action_space.is_bounded():
self.distribution_type = 'beta'
self.action_low = tf.constant(action_space.low, dtype=tf.float32)
self.action_high = tf.constant(action_space.high, dtype=tf.float32)
self.action_range = tf.constant(action_space.high - action_space.low,
dtype=tf.float32)
self.convert_action = lambda a: (a * self.action_range + self.action_low)[0].numpy()
else:
self.distribution_type = 'gaussian'
self.convert_action = lambda a: a[0].numpy()
else:
# discrete:
self.distribution_type = 'categorical'
if isinstance(action_space, gym.spaces.MultiDiscrete):
# make sure all discrete components of the space have the same number of classes
assert np.all(action_space.nvec == action_space.nvec[0])
self.num_actions = action_space.nvec.shape[0]
self.num_classes = action_space.nvec[0] + 1 # to include the last class, i.e. 0 to K (not 0 to k-1)
self.convert_action = lambda a: tf.cast(a[0], dtype=tf.int32).numpy()
else:
self.num_actions = 1
self.num_classes = action_space.n
self.convert_action = lambda a: tf.cast(tf.squeeze(a), dtype=tf.int32).numpy()
def act(self, state, *args, **kwargs):
action = self.network.act(inputs=state)
return self.convert_action(action)
def predict(self, state, *args, **kwargs):
return self.network.predict(inputs=state)
def update(self):
t0 = time.time()
self.seed_regularization()
# Prepare data:
value_batches = self.get_value_batches()
policy_batches = self.get_policy_batches()
# Policy network optimization:
for opt_step in range(self.optimization_steps['policy']):
for data_batch in policy_batches:
self.seed_regularization()
total_loss, policy_grads = self.get_policy_gradients(data_batch)
self.update_policy(policy_grads)
if isinstance(policy_grads, dict):
policy_grads = policy_grads['policy']
self.log(loss_total=total_loss, lr_policy=self.policy_lr.value,
gradients_norm_policy=[tf.norm(gradient) for gradient in policy_grads])
# Value network optimization:
for _ in range(self.optimization_steps['value']):
for data_batch in value_batches:
self.seed_regularization()
value_loss, value_grads = self.get_value_gradients(data_batch)
self.update_value(value_grads)
if isinstance(value_grads, dict):
value_grads = value_grads['value']
self.log(loss_value=value_loss, lr_value=self.value_lr.value,
gradients_norm_value=[tf.norm(gradient) for gradient in value_grads])
print(f'Update took {round(time.time() - t0, 3)}s')
def get_policy_gradients(self, batch):
with tf.GradientTape() as tape:
loss = self.policy_objective(batch)
gradients = tape.gradient(loss, self.network.policy.trainable_variables)
return loss, gradients
def update_policy(self, gradients) -> Union[list, bool]:
return self.apply_policy_gradients(gradients), True
def apply_policy_gradients(self, gradients):
if self.should_clip_policy_grads:
gradients = utils.clip_gradients(gradients, norm=self.grad_norm_policy)
if self.should_polyak_average:
old_weights = self.network.policy.get_weights()
self.network.update_old_policy(old_weights)
self.policy_optimizer.apply_gradients(zip(gradients, self.network.policy.trainable_variables))
utils.polyak_averaging(self.network.policy, old_weights, alpha=self.polyak_coeff)
else:
self.network.update_old_policy()
self.policy_optimizer.apply_gradients(zip(gradients, self.network.policy.trainable_variables))
return gradients
def get_value_gradients(self, batch):
with tf.GradientTape() as tape:
loss = self.value_objective(batch)
gradients = tape.gradient(loss, self.network.value.trainable_variables)
return loss, gradients
def update_value(self, gradients) -> Union[list, bool]:
return self.apply_value_gradients(gradients), True
def apply_value_gradients(self, gradients):
if self.should_clip_value_grads:
gradients = utils.clip_gradients(gradients, norm=self.grad_norm_value)
if self.should_polyak_average:
old_weights = self.network.value.get_weights()
self.value_optimizer.apply_gradients(zip(gradients, self.network.value.trainable_variables))
utils.polyak_averaging(self.network.value, old_weights, alpha=self.polyak_coeff)
else:
self.value_optimizer.apply_gradients(zip(gradients, self.network.value.trainable_variables))
return gradients
def value_batch_tensors(self) -> Union[tuple, dict]:
"""Defines which data to use in `get_value_batches()`"""
return self.memory.states, self.memory.returns
def policy_batch_tensors(self) -> Union[tuple, dict]:
"""Defines which data to use in `get_policy_batches()`"""
return self.memory.states, self.memory.advantages, self.memory.actions, self.memory.log_probabilities
def get_value_batches(self):
"""Computes batches of data for updating the value network"""
return utils.data_to_batches(tensors=self.value_batch_tensors(), batch_size=self.batch_size,
drop_remainder=self.drop_batch_remainder, skip=self.skip_count,
shuffle=True, shuffle_batches=False, num_shards=self.obs_skipping)
def get_policy_batches(self):
"""Computes batches of data for updating the policy network"""
return utils.data_to_batches(tensors=self.policy_batch_tensors(), batch_size=self.batch_size,
drop_remainder=self.drop_batch_remainder, skip=self.skip_count,
num_shards=self.obs_skipping, shuffle=self.shuffle,
shuffle_batches=self.shuffle_batches)
@tf.function
def value_objective(self, batch):
states, returns = batch[:2]
values = self.network.value(states, training=True)
base_loss = tf.reduce_mean(losses.MSE(y_true=returns[:, 0], y_pred=values[:, 0]))
exp_loss = tf.reduce_mean(losses.MSE(y_true=returns[:, 1], y_pred=values[:, 1]))
# normalized loss by (0.25 = 1/2^2) and (1/k^2)
return 0.5 * (0.25 * base_loss + exp_loss / (self.network.exp_scale ** 2))
def policy_objective(self, batch):
"""PPO-Clip Objective"""
states, advantages, actions, old_log_probabilities = batch[:4]
new_policy: tfp.distributions.Distribution = self.network.policy(states, training=True)
# TODO: probable bug -> "self.num_actions == 1"??
if self.distribution_type == 'categorical' and self.num_actions == 1:
batch_size = tf.shape(actions)[0]
actions = tf.reshape(actions, shape=batch_size)
new_log_prob = new_policy.log_prob(actions)
new_log_prob = tf.reshape(new_log_prob, shape=(batch_size, self.num_actions))
else:
# round samples (actions) before computing density:
# motivation: https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/Beta
actions = tf.clip_by_value(actions, utils.EPSILON, 1.0 - utils.EPSILON)
new_log_prob = new_policy.log_prob(actions)
kl_divergence = utils.kl_divergence(old_log_probabilities, new_log_prob)
kl_divergence = tf.reduce_mean(kl_divergence)
# Entropy
entropy = tf.reduce_mean(new_policy.entropy())
entropy_penalty = self.entropy_strength() * entropy
# Compute the probability ratio between the current and old policy
ratio = tf.math.exp(new_log_prob - old_log_probabilities)
ratio = tf.reduce_mean(ratio, axis=1) # mean over per-action ratio
# Compute the clipped ratio times advantage
clip_value = self.clip_ratio()
clipped_ratio = tf.clip_by_value(ratio, clip_value_min=1.0 - clip_value, clip_value_max=1.0 + clip_value)
# Source: https://github.com/openai/spinningup/blob/master/spinup/algos/tf1/ppo/ppo.py#L201
min_adv = tf.where(advantages > 0.0, x=(1.0 + clip_value) * advantages, y=(1.0 - clip_value) * advantages)
# Loss = min { ratio * A, clipped_ratio * A } + entropy_term
policy_loss = -tf.reduce_mean(tf.minimum(ratio * advantages, min_adv))
# policy_loss = -tf.reduce_mean(tf.minimum(ratio * advantages, clipped_ratio * advantages))
total_loss = policy_loss - entropy_penalty
# Log stuff
self.log(ratio=tf.reduce_mean(ratio), log_prob=tf.reduce_mean(new_log_prob), entropy=entropy,
entropy_coeff=self.entropy_strength.value, ratio_clip=clip_value, kl_divergence=kl_divergence,
loss_policy=policy_loss.numpy(), loss_entropy=entropy_penalty.numpy(),
# adv_diff=clipped_ratio - min_adv, adv_ratio=ratio * advantages, adv_min=min_adv,
# adv_minimum=tf.minimum(ratio * advantages, min_adv), adv_min_diff=ratio * advantages - min_adv
)
return total_loss, kl_divergence
def collect(self, episodes: int, timesteps: int, render=True, record_threshold=0.0, seeds=None, close=True):
import random
sample_seed = False
if isinstance(seeds, int):
self.set_random_seed(seed=seeds)
elif isinstance(seeds, list):
sample_seed = True
for episode in range(1, episodes + 1):
if sample_seed:
self.set_random_seed(seed=random.choice(seeds))
self.reset()
episode_reward = 0.0
memory = PPOMemory(state_spec=self.state_spec, num_actions=self.num_actions)
state = self.env.reset()
state = utils.to_tensor(state)
if isinstance(state, dict):
state = {f'state_{k}': v for k, v in state.items()}
for t in range(1, timesteps + 1):
if render:
self.env.render()
action, log_prob, value = self.network.act2(state)
next_state, reward, done, _ = self.env.step(self.convert_action(action))
episode_reward += reward
self.log(actions=action, rewards=reward, values=value, log_probs=log_prob)
memory.append(state, action, reward, value, log_prob)
state = utils.to_tensor(next_state)
if isinstance(state, dict):
state = {f'state_{k}': v for k, v in state.items()}
if done or (t == timesteps):
print(f'Episode {episode} terminated after {t} timesteps with reward {episode_reward}.')
last_value = self.network.predict_last_value(state, is_terminal=done)
memory.end_trajectory(last_value)
break
self.log(evaluation_reward=episode_reward)
self.write_summaries()
if episode_reward >= record_threshold:
memory.serialize(episode, save_path=self.traces_dir)
if close:
self.env.close()
def imitate(self, epochs=1, batch_size: Union[None, int] = None, shuffle_batches=False, shuffle_data=False,
close=True, seed=None):
"""Learn from experience traces collected by 'collect'"""
batch_size = self.batch_size if batch_size is None else batch_size
self.set_random_seed(seed)
for epoch in range(1, epochs + 1):
for i, trace in enumerate(utils.load_traces(self.traces_dir, shuffle=True)):
t0 = time.time()
self.reset()
trace = utils.unpack_trace(trace, unpack=False)
states = utils.to_tensor(trace['state'])[0]
actions = utils.to_float(trace['action'])
log_probs = utils.to_float(trace['log_prob'])
rewards = utils.to_float(trace['reward'])
values = utils.to_float(trace['value'])
data = utils.data_to_batches((states, actions, log_probs, rewards, values), batch_size=batch_size,
shuffle_batches=shuffle_batches, shuffle=shuffle_data, seed=seed,
skip=self.skip_count, num_shards=self.obs_skipping,
drop_remainder=self.drop_batch_remainder)
for batch in data:
states, actions, log_probs, rewards, values = batch
# TODO: refactor
memory = PPOMemory(state_spec=self.state_spec, num_actions=self.num_actions)
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) nexB Inc. and others.
# SPDX-License-Identifier: Apache-2.0
#
# Visit https://aboutcode.org and https://github.com/nexB/ for support and download.
# ScanCode is a trademark of nexB Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import io
import os
from types import GeneratorType
import pytest
from commoncode import fileutils
from commoncode.fileutils import as_posixpath
from commoncode.system import on_linux
from commoncode.system import on_windows
from commoncode.testcase import FileBasedTesting
import extractcode
from extractcode import extract
from extractcode_assert_utils import check_files
from extractcode_assert_utils import check_no_error
from extractcode_assert_utils import BaseArchiveTestCase
project_root = os.path.dirname(os.path.dirname(__file__))
class TestExtract(BaseArchiveTestCase):
test_data_dir = os.path.join(os.path.dirname(__file__), 'data')
def test_extract_file_function(self):
test_file = self.get_test_loc('extract/basic_non_nested.tar.gz', copy=True)
base = fileutils.parent_directory(test_file)
expected = ['a/b/a.txt', 'a/b/b.txt', 'a/c/c.txt']
cleaned_test_file = test_file.replace(base, '')
expected_event = [
extract.ExtractEvent(
source=cleaned_test_file,
target=extractcode.get_extraction_path(cleaned_test_file),
done=False, warnings=[], errors=[]
),
extract.ExtractEvent(
source=cleaned_test_file,
target=extractcode.get_extraction_path(cleaned_test_file),
done=True, warnings=[], errors=[]
)
]
target = extractcode.get_extraction_path(test_file)
result = list(extract.extract_file(test_file, target))
result = [r._replace(
source=cleaned_test_file,
target=extractcode.get_extraction_path(cleaned_test_file))
for r in result]
assert expected_event == result
check_files(target, expected)
def test_extract_archive_non_nested(self):
test_dir = self.get_test_loc('extract/basic_non_nested.tar.gz', copy=True)
expected = (
'a/b/a.txt',
'a/b/b.txt',
'a/c/c.txt',
)
result = extract.extract(test_dir, recurse=False)
check_no_error(result)
check_files(extractcode.get_extraction_path(test_dir), expected)
result = extract.extract(test_dir, recurse=True)
check_no_error(result)
check_files(extractcode.get_extraction_path(test_dir), expected)
def test_extract_archive_shallow_with_readonly_inside(self):
test_file = self.get_test_loc('extract/readonly/read_only.tar.gz', copy=True)
"""
This test file was created with:
import tarfile, time, datetime, io, os
TEXT = 'something\n'
tar = tarfile.open('read_only.tar.gz', 'w:gz')
for i in range(0, 2):
tarinfo = tarfile.TarInfo()
tarinfo.name = 'somefilename-%i.txt' % i
tarinfo.uid = 123
tarinfo.gid = 456
tarinfo.uname = 'johndoe'
tarinfo.gname = 'fake'
tarinfo.type = tarfile.REGTYPE
tarinfo.mode = 0 # this is the readonly part
tarinfo.mtime = time.mktime(datetime.datetime.now().timetuple())
file = io.StringIO()
file.write(TEXT)
file.seek(0)
tarinfo.size = len(TEXT)
tar.addfile(tarinfo, file)
tar.close()
"""
result = list(extract.extract(test_file, recurse=False))
check_no_error(result)
expected = (
'somefilename-0.txt',
'somefilename-1.txt',
)
test_dir = extractcode.get_extraction_path(test_file)
check_files(test_dir, expected)
def test_extract_dir_shallow_with_readonly_inside(self):
test_dir = self.get_test_loc('extract/readonly', copy=True)
result = list(extract.extract(test_dir, recurse=False))
check_no_error(result)
expected = [
'read_only.tar.gz',
'read_only.tar.gz-extract/somefilename-0.txt',
'read_only.tar.gz-extract/somefilename-1.txt',
]
check_files(test_dir, expected)
def test_extract_tree_shallow_only(self):
expected = (
'a/a.tar.gz',
'a/a.txt',
'a/a.tar.gz-extract/a/b/a.txt',
'a/a.tar.gz-extract/a/b/b.txt',
'a/a.tar.gz-extract/a/c/c.txt',
'b/a.txt',
'b/b.tar.gz',
'b/b.tar.gz-extract/b/.svn/all-wcprops',
'b/b.tar.gz-extract/b/.svn/entries',
'b/b.tar.gz-extract/b/.svn/format',
'b/b.tar.gz-extract/b/a/a.tar.gz',
'b/b.tar.gz-extract/b/a/a.txt',
'b/b.tar.gz-extract/b/a/.svn/all-wcprops',
'b/b.tar.gz-extract/b/a/.svn/entries',
'b/b.tar.gz-extract/b/a/.svn/format',
'b/b.tar.gz-extract/b/a/.svn/prop-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/a/.svn/text-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/a/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz-extract/b/b/a.txt',
'b/b.tar.gz-extract/b/b/.svn/all-wcprops',
'b/b.tar.gz-extract/b/b/.svn/entries',
'b/b.tar.gz-extract/b/b/.svn/format',
'b/b.tar.gz-extract/b/b/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz-extract/b/c/a.tar.gz',
'b/b.tar.gz-extract/b/c/a.txt',
'b/b.tar.gz-extract/b/c/.svn/all-wcprops',
'b/b.tar.gz-extract/b/c/.svn/entries',
'b/b.tar.gz-extract/b/c/.svn/format',
'b/b.tar.gz-extract/b/c/.svn/prop-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/c/.svn/text-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/c/.svn/text-base/a.txt.svn-base',
'c/a.tar.gz',
'c/a.txt',
'c/a.tar.gz-extract/a/b/a.txt',
'c/a.tar.gz-extract/a/b/b.txt',
'c/a.tar.gz-extract/a/c/c.txt',
)
test_dir = self.get_test_loc('extract/tree', copy=True)
result = list(extract.extract(test_dir, recurse=False))
check_no_error(result)
check_files(test_dir, expected)
# extract again
result = list(extract.extract(test_dir, recurse=False))
check_no_error(result)
check_files(test_dir, expected)
def test_extract_tree_recursive(self):
expected = (
'a/a.tar.gz',
'a/a.txt',
'a/a.tar.gz-extract/a/b/a.txt',
'a/a.tar.gz-extract/a/b/b.txt',
'a/a.tar.gz-extract/a/c/c.txt',
'b/a.txt',
'b/b.tar.gz',
'b/b.tar.gz-extract/b/.svn/all-wcprops',
'b/b.tar.gz-extract/b/.svn/entries',
'b/b.tar.gz-extract/b/.svn/format',
'b/b.tar.gz-extract/b/a/a.tar.gz',
'b/b.tar.gz-extract/b/a/a.txt',
'b/b.tar.gz-extract/b/a/.svn/all-wcprops',
'b/b.tar.gz-extract/b/a/.svn/entries',
'b/b.tar.gz-extract/b/a/.svn/format',
'b/b.tar.gz-extract/b/a/.svn/prop-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/a/.svn/text-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/a/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz-extract/b/a/a.tar.gz-extract/a/b/a.txt',
'b/b.tar.gz-extract/b/a/a.tar.gz-extract/a/b/b.txt',
'b/b.tar.gz-extract/b/a/a.tar.gz-extract/a/c/c.txt',
'b/b.tar.gz-extract/b/b/a.txt',
'b/b.tar.gz-extract/b/b/.svn/all-wcprops',
'b/b.tar.gz-extract/b/b/.svn/entries',
'b/b.tar.gz-extract/b/b/.svn/format',
'b/b.tar.gz-extract/b/b/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz-extract/b/c/a.tar.gz',
'b/b.tar.gz-extract/b/c/a.txt',
'b/b.tar.gz-extract/b/c/.svn/all-wcprops',
'b/b.tar.gz-extract/b/c/.svn/entries',
'b/b.tar.gz-extract/b/c/.svn/format',
'b/b.tar.gz-extract/b/c/.svn/prop-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/c/.svn/text-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/c/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz-extract/b/c/a.tar.gz-extract/a/b/a.txt',
'b/b.tar.gz-extract/b/c/a.tar.gz-extract/a/b/b.txt',
'b/b.tar.gz-extract/b/c/a.tar.gz-extract/a/c/c.txt',
'c/a.tar.gz',
'c/a.txt',
'c/a.tar.gz-extract/a/b/a.txt',
'c/a.tar.gz-extract/a/b/b.txt',
'c/a.tar.gz-extract/a/c/c.txt',
)
test_dir = self.get_test_loc('extract/tree', copy=True)
result = list(extract.extract(test_dir, recurse=True))
check_no_error(result)
check_files(test_dir, expected)
# again
result = list(extract.extract(test_dir, recurse=True))
check_no_error(result)
check_files(test_dir, expected)
def test_extract_tree_recursive_replace_originals(self):
expected = (
'a/a.txt',
'a/a.tar.gz/a/b/a.txt',
'a/a.tar.gz/a/b/b.txt',
'a/a.tar.gz/a/c/c.txt',
'b/a.txt',
'b/b.tar.gz/b/.svn/all-wcprops',
'b/b.tar.gz/b/.svn/entries',
'b/b.tar.gz/b/.svn/format',
'b/b.tar.gz/b/a/a.txt',
'b/b.tar.gz/b/a/.svn/all-wcprops',
'b/b.tar.gz/b/a/.svn/entries',
'b/b.tar.gz/b/a/.svn/format',
'b/b.tar.gz/b/a/.svn/prop-base/a.tar.gz.svn-base',
'b/b.tar.gz/b/a/.svn/text-base/a.tar.gz.svn-base',
'b/b.tar.gz/b/a/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz/b/a/a.tar.gz/a/b/a.txt',
'b/b.tar.gz/b/a/a.tar.gz/a/b/b.txt',
'b/b.tar.gz/b/a/a.tar.gz/a/c/c.txt',
'b/b.tar.gz/b/b/a.txt',
'b/b.tar.gz/b/b/.svn/all-wcprops',
'b/b.tar.gz/b/b/.svn/entries',
'b/b.tar.gz/b/b/.svn/format',
'b/b.tar.gz/b/b/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz/b/c/a.txt',
'b/b.tar.gz/b/c/.svn/all-wcprops',
'b/b.tar.gz/b/c/.svn/entries',
'b/b.tar.gz/b/c/.svn/format',
'b/b.tar.gz/b/c/.svn/prop-base/a.tar.gz.svn-base',
'b/b.tar.gz/b/c/.svn/text-base/a.tar.gz.svn-base',
'b/b.tar.gz/b/c/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz/b/c/a.tar.gz/a/b/a.txt',
'b/b.tar.gz/b/c/a.tar.gz/a/b/b.txt',
'b/b.tar.gz/b/c/a.tar.gz/a/c/c.txt',
'c/a.txt',
'c/a.tar.gz/a/b/a.txt',
'c/a.tar.gz/a/b/b.txt',
'c/a.tar.gz/a/c/c.txt',
)
test_dir = self.get_test_loc('extract/tree', copy=True)
result = list(extract.extract(test_dir, recurse=True, replace_originals=True))
check_no_error(result)
check_files(test_dir, expected)
# again
result = list(extract.extract(test_dir, recurse=True))
check_no_error(result)
check_files(test_dir, expected)
def test_extract_tree_shallow_then_recursive(self):
shallow = (
'a/a.tar.gz',
'a/a.txt',
'a/a.tar.gz-extract/a/b/a.txt',
'a/a.tar.gz-extract/a/b/b.txt',
'a/a.tar.gz-extract/a/c/c.txt',
'b/a.txt',
'b/b.tar.gz',
'b/b.tar.gz-extract/b/.svn/all-wcprops',
'b/b.tar.gz-extract/b/.svn/entries',
'b/b.tar.gz-extract/b/.svn/format',
'b/b.tar.gz-extract/b/a/a.tar.gz',
'b/b.tar.gz-extract/b/a/a.txt',
'b/b.tar.gz-extract/b/a/.svn/all-wcprops',
'b/b.tar.gz-extract/b/a/.svn/entries',
'b/b.tar.gz-extract/b/a/.svn/format',
'b/b.tar.gz-extract/b/a/.svn/prop-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/a/.svn/text-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/a/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz-extract/b/b/a.txt',
'b/b.tar.gz-extract/b/b/.svn/all-wcprops',
'b/b.tar.gz-extract/b/b/.svn/entries',
'b/b.tar.gz-extract/b/b/.svn/format',
'b/b.tar.gz-extract/b/b/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz-extract/b/c/a.tar.gz',
'b/b.tar.gz-extract/b/c/a.txt',
'b/b.tar.gz-extract/b/c/.svn/all-wcprops',
'b/b.tar.gz-extract/b/c/.svn/entries',
'b/b.tar.gz-extract/b/c/.svn/format',
'b/b.tar.gz-extract/b/c/.svn/prop-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/c/.svn/text-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/c/.svn/text-base/a.txt.svn-base',
'c/a.tar.gz',
'c/a.txt',
'c/a.tar.gz-extract/a/b/a.txt',
'c/a.tar.gz-extract/a/b/b.txt',
'c/a.tar.gz-extract/a/c/c.txt',
)
recursed = (
'a/a.tar.gz',
'a/a.txt',
'a/a.tar.gz-extract/a/b/a.txt',
'a/a.tar.gz-extract/a/b/b.txt',
'a/a.tar.gz-extract/a/c/c.txt',
'b/a.txt',
'b/b.tar.gz',
'b/b.tar.gz-extract/b/.svn/all-wcprops',
'b/b.tar.gz-extract/b/.svn/entries',
'b/b.tar.gz-extract/b/.svn/format',
'b/b.tar.gz-extract/b/a/a.tar.gz',
'b/b.tar.gz-extract/b/a/a.txt',
'b/b.tar.gz-extract/b/a/.svn/all-wcprops',
'b/b.tar.gz-extract/b/a/.svn/entries',
'b/b.tar.gz-extract/b/a/.svn/format',
'b/b.tar.gz-extract/b/a/.svn/prop-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/a/.svn/text-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/a/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz-extract/b/a/a.tar.gz-extract/a/b/a.txt',
'b/b.tar.gz-extract/b/a/a.tar.gz-extract/a/b/b.txt',
'b/b.tar.gz-extract/b/a/a.tar.gz-extract/a/c/c.txt',
'b/b.tar.gz-extract/b/b/a.txt',
'b/b.tar.gz-extract/b/b/.svn/all-wcprops',
'b/b.tar.gz-extract/b/b/.svn/entries',
'b/b.tar.gz-extract/b/b/.svn/format',
'b/b.tar.gz-extract/b/b/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz-extract/b/c/a.tar.gz',
'b/b.tar.gz-extract/b/c/a.txt',
'b/b.tar.gz-extract/b/c/.svn/all-wcprops',
'b/b.tar.gz-extract/b/c/.svn/entries',
'b/b.tar.gz-extract/b/c/.svn/format',
'b/b.tar.gz-extract/b/c/.svn/prop-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/c/.svn/text-base/a.tar.gz.svn-base',
'b/b.tar.gz-extract/b/c/.svn/text-base/a.txt.svn-base',
'b/b.tar.gz-extract/b/c/a.tar.gz-extract/a/b/a.txt',
'b/b.tar.gz-extract/b/c/a.tar.gz-extract/a/b/b.txt',
'b/b.tar.gz-extract/b/c/a.tar.gz-extract/a/c/c.txt',
'c/a.tar.gz',
'c/a.txt',
'c/a.tar.gz-extract/a/b/a.txt',
'c/a.tar.gz-extract/a/b/b.txt',
'c/a.tar.gz-extract/a/c/c.txt',
)
test_dir = self.get_test_loc('extract/tree', copy=True)
result = list(extract.extract(test_dir, recurse=False))
check_no_error(result)
check_files(test_dir, shallow)
result = list(extract.extract(test_dir, recurse=True))
check_no_error(result)
check_files(test_dir, recursed)
def test_uncompress_corrupted_archive_with_zlib(self):
from extractcode import archive
test_file = self.get_test_loc('extract/corrupted/a.tar.gz', copy=True)
test_dir = self.get_temp_dir()
expected = Exception('Error -3 while decompressing')
self.assertRaisesInstance(expected, archive.uncompress_gzip, test_file, test_dir)
def test_uncompress_corrupted_archive_with_libarchive(self):
from extractcode import libarchive2
test_file = self.get_test_loc('extract/corrupted/a.tar.gz', copy=True)
test_dir = self.get_temp_dir()
expected = Exception('gzip decompression failed')
self.assertRaisesInstance(expected, libarchive2.extract, test_file, test_dir)
@pytest.mark.skipif(not on_linux, reason='Expectations are different on Windows and macOS')
def test_extract_tree_with_corrupted_archives_linux(self):
expected = (
'a.tar.gz',
)
test_dir = self.get_test_loc('extract/corrupted', copy=True)
result = list(extract.extract(test_dir, recurse=False))
check_files(test_dir, expected)
assert len(result) == 2
result = result[1]
assert len(result.errors) == 1
assert result.errors[0].startswith('gzip decompression failed')
assert not result.warnings
@pytest.mark.skipif(on_linux, reason='Expectations are different on Windows and macOS')
def test_extract_tree_with_corrupted_archives_mac_win(self):
expected = (
'a.tar.gz',
)
test_dir = self.get_test_loc('extract/corrupted', copy=True)
result = list(extract.extract(test_dir, recurse=False))
check_files(test_dir, expected)
assert len(result) == 2
result = result[1]
errs = ['gzip decompression failed']
assert errs == result.errors
assert not result.warnings
def test_extract_with_empty_dir_and_small_files_ignores_empty_dirs(self):
expected = (
'empty_small.zip',
'empty_small.zip-extract/empty_dirs_and_small_files/small_files/small_file.txt',)
test_dir = self.get_test_loc('extract/small', copy=True)
result = list(extract.extract(test_dir, recurse=True))
check_no_error(result)
check_files(test_dir, expected)
def test_extract_tar_with_broken_links(self):
test_dir = self.get_test_loc('extract/broken_link', copy=True)
result = list(extract.extract(test_dir, recurse=True))
expected = (
'broken-link.tar.bz2',
'broken-link.tar.bz2-extract/openssl/test/Makefile',
)
check_files(test_dir, expected)
expected_warning = [[], []]
warns = [r.warnings for r in result]
assert expected_warning == warns
def test_extract_nested_tar_file_recurse_only(self):
test_file = self.get_test_loc('extract/nested/nested_tars.tar.gz', copy=True)
expected = [
'nested_tars.tar.gz',
'nested_tars.tar.gz-extract/b/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/.svn/entries',
'nested_tars.tar.gz-extract/b/.svn/format',
'nested_tars.tar.gz-extract/b/a/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/a/.svn/entries',
'nested_tars.tar.gz-extract/b/a/.svn/format',
'nested_tars.tar.gz-extract/b/a/.svn/prop-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/a/.svn/text-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/a/.svn/text-base/a.txt.svn-base',
'nested_tars.tar.gz-extract/b/a/a.tar.gz',
'nested_tars.tar.gz-extract/b/a/a.tar.gz-extract/a/b/a.txt',
'nested_tars.tar.gz-extract/b/a/a.tar.gz-extract/a/b/b.txt',
'nested_tars.tar.gz-extract/b/a/a.tar.gz-extract/a/c/c.txt',
'nested_tars.tar.gz-extract/b/a/a.txt',
'nested_tars.tar.gz-extract/b/b/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/b/.svn/entries',
'nested_tars.tar.gz-extract/b/b/.svn/format',
'nested_tars.tar.gz-extract/b/b/.svn/text-base/a.txt.svn-base',
'nested_tars.tar.gz-extract/b/b/a.txt',
'nested_tars.tar.gz-extract/b/c/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/c/.svn/entries',
'nested_tars.tar.gz-extract/b/c/.svn/format',
'nested_tars.tar.gz-extract/b/c/.svn/prop-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/c/.svn/text-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/c/.svn/text-base/a.txt.svn-base',
'nested_tars.tar.gz-extract/b/c/a.tar.gz',
'nested_tars.tar.gz-extract/b/c/a.tar.gz-extract/a/b/a.txt',
'nested_tars.tar.gz-extract/b/c/a.tar.gz-extract/a/b/b.txt',
'nested_tars.tar.gz-extract/b/c/a.tar.gz-extract/a/c/c.txt',
'nested_tars.tar.gz-extract/b/c/a.txt'
]
result = list(extract.extract(test_file, recurse=True))
check_no_error(result)
check_files(test_file, expected)
def test_extract_nested_tar_file_shallow_only(self):
test_dir = self.get_test_loc('extract/nested/nested_tars.tar.gz', copy=True)
expected = [
'nested_tars.tar.gz',
'nested_tars.tar.gz-extract/b/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/.svn/entries',
'nested_tars.tar.gz-extract/b/.svn/format',
'nested_tars.tar.gz-extract/b/a/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/a/.svn/entries',
'nested_tars.tar.gz-extract/b/a/.svn/format',
'nested_tars.tar.gz-extract/b/a/.svn/prop-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/a/.svn/text-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/a/.svn/text-base/a.txt.svn-base',
'nested_tars.tar.gz-extract/b/a/a.tar.gz',
'nested_tars.tar.gz-extract/b/a/a.txt',
'nested_tars.tar.gz-extract/b/b/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/b/.svn/entries',
'nested_tars.tar.gz-extract/b/b/.svn/format',
'nested_tars.tar.gz-extract/b/b/.svn/text-base/a.txt.svn-base',
'nested_tars.tar.gz-extract/b/b/a.txt',
'nested_tars.tar.gz-extract/b/c/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/c/.svn/entries',
'nested_tars.tar.gz-extract/b/c/.svn/format',
'nested_tars.tar.gz-extract/b/c/.svn/prop-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/c/.svn/text-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/c/.svn/text-base/a.txt.svn-base',
'nested_tars.tar.gz-extract/b/c/a.tar.gz',
'nested_tars.tar.gz-extract/b/c/a.txt'
]
result1 = list(extract.extract(test_dir, recurse=False))
check_no_error(result1)
check_files(test_dir, expected)
def test_extract_nested_tar_file_shallow_then_recurse(self):
test_file = self.get_test_loc('extract/nested/nested_tars.tar.gz', copy=True)
expected = [
'nested_tars.tar.gz',
'nested_tars.tar.gz-extract/b/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/.svn/entries',
'nested_tars.tar.gz-extract/b/.svn/format',
'nested_tars.tar.gz-extract/b/a/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/a/.svn/entries',
'nested_tars.tar.gz-extract/b/a/.svn/format',
'nested_tars.tar.gz-extract/b/a/.svn/prop-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/a/.svn/text-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/a/.svn/text-base/a.txt.svn-base',
'nested_tars.tar.gz-extract/b/a/a.tar.gz',
'nested_tars.tar.gz-extract/b/a/a.tar.gz-extract/a/b/a.txt',
'nested_tars.tar.gz-extract/b/a/a.tar.gz-extract/a/b/b.txt',
'nested_tars.tar.gz-extract/b/a/a.tar.gz-extract/a/c/c.txt',
'nested_tars.tar.gz-extract/b/a/a.txt',
'nested_tars.tar.gz-extract/b/b/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/b/.svn/entries',
'nested_tars.tar.gz-extract/b/b/.svn/format',
'nested_tars.tar.gz-extract/b/b/.svn/text-base/a.txt.svn-base',
'nested_tars.tar.gz-extract/b/b/a.txt',
'nested_tars.tar.gz-extract/b/c/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/c/.svn/entries',
'nested_tars.tar.gz-extract/b/c/.svn/format',
'nested_tars.tar.gz-extract/b/c/.svn/prop-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/c/.svn/text-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/c/.svn/text-base/a.txt.svn-base',
'nested_tars.tar.gz-extract/b/c/a.tar.gz',
'nested_tars.tar.gz-extract/b/c/a.tar.gz-extract/a/b/a.txt',
'nested_tars.tar.gz-extract/b/c/a.tar.gz-extract/a/b/b.txt',
'nested_tars.tar.gz-extract/b/c/a.tar.gz-extract/a/c/c.txt',
'nested_tars.tar.gz-extract/b/c/a.txt'
]
result1 = list(extract.extract(test_file, recurse=False))
result2 = list(extract.extract(test_file, recurse=True))
check_no_error(result1)
check_no_error(result2)
check_files(test_file, expected)
def test_extract_dir_with_nested_tar_file_shallow_then_recurse(self):
test_dir = self.get_test_loc('extract/nested', copy=True)
expected = [
'nested_tars.tar.gz',
'nested_tars.tar.gz-extract/b/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/.svn/entries',
'nested_tars.tar.gz-extract/b/.svn/format',
'nested_tars.tar.gz-extract/b/a/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/a/.svn/entries',
'nested_tars.tar.gz-extract/b/a/.svn/format',
'nested_tars.tar.gz-extract/b/a/.svn/prop-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/a/.svn/text-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/a/.svn/text-base/a.txt.svn-base',
'nested_tars.tar.gz-extract/b/a/a.tar.gz',
'nested_tars.tar.gz-extract/b/a/a.tar.gz-extract/a/b/a.txt',
'nested_tars.tar.gz-extract/b/a/a.tar.gz-extract/a/b/b.txt',
'nested_tars.tar.gz-extract/b/a/a.tar.gz-extract/a/c/c.txt',
'nested_tars.tar.gz-extract/b/a/a.txt',
'nested_tars.tar.gz-extract/b/b/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/b/.svn/entries',
'nested_tars.tar.gz-extract/b/b/.svn/format',
'nested_tars.tar.gz-extract/b/b/.svn/text-base/a.txt.svn-base',
'nested_tars.tar.gz-extract/b/b/a.txt',
'nested_tars.tar.gz-extract/b/c/.svn/all-wcprops',
'nested_tars.tar.gz-extract/b/c/.svn/entries',
'nested_tars.tar.gz-extract/b/c/.svn/format',
'nested_tars.tar.gz-extract/b/c/.svn/prop-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/c/.svn/text-base/a.tar.gz.svn-base',
'nested_tars.tar.gz-extract/b/c/.svn/text-base/a.txt.svn-base',
'nested_tars.tar.gz-extract/b/c/a.tar.gz',
'nested_tars.tar.gz-extract/b/c/a.tar.gz-extract/a/b/a.txt',
'nested_tars.tar.gz-extract/b/c/a.tar.gz-extract/a/b/b.txt',
'nested_tars.tar.gz-extract/b/c/a.tar.gz-extract/a/c/c.txt',
'nested_tars.tar.gz-extract/b/c/a.txt'
]
result1 = list(extract.extract(test_dir, recurse=False))
result2 = list(extract.extract(test_dir, recurse=True))
check_no_error(result1)
check_no_error(result2)
check_files(test_dir, expected)
def test_extract_zip_with_spaces_in_name(self):
test_dir = self.get_test_loc('extract/space-zip', copy=True)
expected = (
'with spaces in name.zip',
'with spaces in name.zip-extract/empty_dirs_and_small_files/small_files/small_file.txt'
)
result = list(extract.extract(test_dir, recurse=True))
check_no_error(result)
check_files(test_dir, expected)
def test_extract_tar_gz_with_spaces_in_name(self):
test_dir = self.get_test_loc('extract/space-tgz', copy=True)
expected = (
'with spaces in name.tar.gz',
'with spaces in name.tar.gz-extract/a/b/a.txt',
'with spaces in name.tar.gz-extract/a/b/b.txt',
'with spaces in name.tar.gz-extract/a/c/c.txt',
)
result = list(extract.extract(test_dir, recurse=True))
check_no_error(result)
check_files(test_dir, expected)
def test_extract_tar_with_special_files(self):
test_dir = self.get_test_loc('extract/special', copy=True)
expected = [
't.tgz',
't.tgz-extract/0-REGTYPE',
't.tgz-extract/0-REGTYPE-TEXT',
't.tgz-extract/0-REGTYPE-VEEEERY_LONG_NAME_____________________________________________________________________________________________________________________155',
# we skip links but not hardlinks
't.tgz-extract/1-LNKTYPE',
't.tgz-extract/S-SPARSE',
't.tgz-extract/S-SPARSE-WITH-NULLS',
]
result = list(extract.extract(test_dir, recurse=True))
check_files(test_dir, expected)
errs = [r.errors for r in result if r.errors]
assert [] == errs
warns = [r.warnings for r in result if r.warnings]
assert [] == warns
def test_extract_directory_of_windows_ar_archives(self):
test_dir = self.get_test_loc('extract/ar_tree/winlib', copy=True)
result = list(extract.extract(test_dir, recurse=True))
expected = self.get_test_loc('extract/ar_tree/winlib-expected.json')
check_files(test_dir, expected, regen=False)
check_no_error(result)
def test_extract_nested_arch_with_corruption_should_extract_inner_archives_only_once(self):
test_file = self.get_test_loc(
'extract/nested_not_compressed/nested_with_not_compressed_gz_file.tgz', copy=True)
expected = [
'nested_with_not_compressed_gz_file.tgz',
'nested_with_not_compressed_gz_file.tgz-extract/top/file',
'nested_with_not_compressed_gz_file.tgz-extract/top/notcompressed.gz'
]
result = list(extract.extract(test_file, recurse=True))
check_no_error(result)
check_files(test_file, expected)
def test_extract_directory_with_office_docs(self):
test_dir = self.get_test_loc('extract/office_docs', copy=True)
result = list(extract.extract(test_dir, kinds=(extractcode.docs,), recurse=True))
expected = [
'abc.docx',
'abc.docx-extract/[Content_Types].xml',
'abc.docx-extract/docProps/app.xml',
'abc.docx-extract/docProps/core.xml',
'abc.docx-extract/_rels/.rels',
'abc.docx-extract/word/fontTable.xml',
'abc.docx-extract/word/document.xml',
'abc.docx-extract/word/settings.xml',
'abc.docx-extract/word/numbering.xml',
'abc.docx-extract/word/activeX/activeX1.xml',
'abc.docx-extract/word/activeX/activeX2.xml',
'abc.docx-extract/word/activeX/activeX3.xml',
'abc.docx-extract/word/activeX/_rels/activeX1.xml.rels',
'abc.docx-extract/word/activeX/_rels/activeX2.xml.rels',
'abc.docx-extract/word/activeX/_rels/activeX3.xml.rels',
'abc.docx-extract/word/activeX/activeX1.bin',
'abc.docx-extract/word/activeX/activeX3.bin',
'abc.docx-extract/word/activeX/activeX2.bin',
'abc.docx-extract/word/webSettings.xml',
'abc.docx-extract/word/styles.xml',
'abc.docx-extract/word/theme/theme1.xml',
'abc.docx-extract/word/_rels/document.xml.rels',
'abc.docx-extract/word/stylesWithEffects.xml',
'abc.docx-extract/word/media/image1.gif',
'abc.docx-extract/word/media/image4.wmf',
'abc.docx-extract/word/media/image2.wmf',
'abc.docx-extract/word/media/image3.wmf',
'excel.xlsx',
'excel.xlsx-extract/[Content_Types].xml',
'excel.xlsx-extract/docProps/app.xml',
'excel.xlsx-extract/docProps/core.xml',
'excel.xlsx-extract/_rels/.rels',
'excel.xlsx-extract/xl/workbook.xml',
'excel.xlsx-extract/xl/worksheets/sheet2.xml',
'excel.xlsx-extract/xl/worksheets/sheet3.xml',
'excel.xlsx-extract/xl/worksheets/sheet1.xml',
'excel.xlsx-extract/xl/styles.xml',
'excel.xlsx-extract/xl/theme/theme1.xml',
'excel.xlsx-extract/xl/_rels/workbook.xml.rels',
'excel.xlsx-extract/xl/sharedStrings.xml',
'ppt.pptx',
'ppt.pptx-extract/[Content_Types].xml',
'ppt.pptx-extract/docProps/app.xml',
'ppt.pptx-extract/docProps/thumbnail.jpeg',
'ppt.pptx-extract/docProps/core.xml',
'ppt.pptx-extract/_rels/.rels',
'ppt.pptx-extract/ppt/viewProps.xml',
'ppt.pptx-extract/ppt/slideLayouts/slideLayout9.xml',
'ppt.pptx-extract/ppt/slideLayouts/slideLayout8.xml',
'ppt.pptx-extract/ppt/slideLayouts/_rels/slideLayout5.xml.rels',
'ppt.pptx-extract/ppt/slideLayouts/_rels/slideLayout4.xml.rels',
'ppt.pptx-extract/ppt/slideLayouts/_rels/slideLayout2.xml.rels',
'ppt.pptx-extract/ppt/slideLayouts/_rels/slideLayout3.xml.rels',
'ppt.pptx-extract/ppt/slideLayouts/_rels/slideLayout8.xml.rels',
'ppt.pptx-extract/ppt/slideLayouts/_rels/slideLayout9.xml.rels',
'ppt.pptx-extract/ppt/slideLayouts/_rels/slideLayout11.xml.rels',
'ppt.pptx-extract/ppt/slideLayouts/_rels/slideLayout10.xml.rels',
'ppt.pptx-extract/ppt/slideLayouts/_rels/slideLayout6.xml.rels',
'ppt.pptx-extract/ppt/slideLayouts/_rels/slideLayout7.xml.rels',
'ppt.pptx-extract/ppt/slideLayouts/_rels/slideLayout1.xml.rels',
'ppt.pptx-extract/ppt/slideLayouts/slideLayout3.xml',
'ppt.pptx-extract/ppt/slideLayouts/slideLayout2.xml',
'ppt.pptx-extract/ppt/slideLayouts/slideLayout1.xml',
'ppt.pptx-extract/ppt/slideLayouts/slideLayout5.xml',
'ppt.pptx-extract/ppt/slideLayouts/slideLayout4.xml',
'ppt.pptx-extract/ppt/slideLayouts/slideLayout6.xml',
'ppt.pptx-extract/ppt/slideLayouts/slideLayout10.xml',
'ppt.pptx-extract/ppt/slideLayouts/slideLayout11.xml',
'ppt.pptx-extract/ppt/slideLayouts/slideLayout7.xml',
'ppt.pptx-extract/ppt/presentation.xml',
'ppt.pptx-extract/ppt/slideMasters/slideMaster1.xml',
'ppt.pptx-extract/ppt/slideMasters/_rels/slideMaster1.xml.rels',
'ppt.pptx-extract/ppt/slides/slide1.xml',
'ppt.pptx-extract/ppt/slides/_rels/slide1.xml.rels',
'ppt.pptx-extract/ppt/theme/theme1.xml',
'ppt.pptx-extract/ppt/_rels/presentation.xml.rels',
'ppt.pptx-extract/ppt/presProps.xml',
'ppt.pptx-extract/ppt/tableStyles.xml',
'ppt.pptx-extract/ppt/media/image1.png'
]
check_files(test_dir, expected)
check_no_error(result)
def touch(self, location):
with | |
output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc == buttap:
z, p, k = typefunc(N)
elif typefunc == besselap:
z, p, k = typefunc(N, norm=bessel_norms[ftype])
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp | |
import functools
import hashlib
import logging
import os
import shutil
import sys
from io import BytesIO
from shutil import copy, rmtree
from tempfile import mkdtemp
import pytest
from mock import Mock, patch
import pip
from pip._internal.download import (
CI_ENVIRONMENT_VARIABLES,
MultiDomainBasicAuth,
PipSession,
SafeFileCache,
_copy_source_tree,
_download_http_url,
_get_url_scheme,
parse_content_disposition,
sanitize_content_filename,
unpack_file_url,
unpack_http_url,
url_to_path,
)
from pip._internal.exceptions import HashMismatch
from pip._internal.models.link import Link
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.misc import path_to_url
from tests.lib import create_file
from tests.lib.filesystem import (
get_filelist,
make_socket_file,
make_unreadable_file,
)
from tests.lib.path import Path
@pytest.fixture(scope="function")
def cache_tmpdir(tmpdir):
cache_dir = tmpdir.joinpath("cache")
cache_dir.mkdir(parents=True)
yield cache_dir
def test_unpack_http_url_with_urllib_response_without_content_type(data):
"""
It should download and unpack files even if no Content-Type header exists
"""
_real_session = PipSession()
def _fake_session_get(*args, **kwargs):
resp = _real_session.get(*args, **kwargs)
del resp.headers["Content-Type"]
return resp
session = Mock()
session.get = _fake_session_get
uri = path_to_url(data.packages.joinpath("simple-1.0.tar.gz"))
link = Link(uri)
temp_dir = mkdtemp()
try:
unpack_http_url(
link,
temp_dir,
download_dir=None,
session=session,
)
assert set(os.listdir(temp_dir)) == {
'PKG-INFO', 'setup.cfg', 'setup.py', 'simple', 'simple.egg-info'
}
finally:
rmtree(temp_dir)
def get_user_agent():
return PipSession().headers["User-Agent"]
def test_user_agent():
user_agent = get_user_agent()
assert user_agent.startswith("pip/%s" % pip.__version__)
@pytest.mark.parametrize('name, expected_like_ci', [
('BUILD_BUILDID', True),
('BUILD_ID', True),
('CI', True),
('PIP_IS_CI', True),
# Test a prefix substring of one of the variable names we use.
('BUILD', False),
])
def test_user_agent__ci(monkeypatch, name, expected_like_ci):
# Delete the variable names we use to check for CI to prevent the
# detection from always returning True in case the tests are being run
# under actual CI. It is okay to depend on CI_ENVIRONMENT_VARIABLES
# here (part of the code under test) because this setup step can only
# prevent false test failures. It can't cause a false test passage.
for ci_name in CI_ENVIRONMENT_VARIABLES:
monkeypatch.delenv(ci_name, raising=False)
# Confirm the baseline before setting the environment variable.
user_agent = get_user_agent()
assert '"ci":null' in user_agent
assert '"ci":true' not in user_agent
monkeypatch.setenv(name, 'true')
user_agent = get_user_agent()
assert ('"ci":true' in user_agent) == expected_like_ci
assert ('"ci":null' in user_agent) == (not expected_like_ci)
def test_user_agent_user_data(monkeypatch):
monkeypatch.setenv("PIP_USER_AGENT_USER_DATA", "some_string")
assert "some_string" in PipSession().headers["User-Agent"]
class FakeStream(object):
def __init__(self, contents):
self._io = BytesIO(contents)
def read(self, size, decode_content=None):
return self._io.read(size)
def stream(self, size, decode_content=None):
yield self._io.read(size)
def release_conn(self):
pass
class MockResponse(object):
def __init__(self, contents):
self.raw = FakeStream(contents)
self.content = contents
self.request = None
self.status_code = 200
self.connection = None
self.url = None
self.headers = {}
self.history = []
def raise_for_status(self):
pass
class MockConnection(object):
def _send(self, req, **kwargs):
raise NotImplementedError("_send must be overridden for tests")
def send(self, req, **kwargs):
resp = self._send(req, **kwargs)
for cb in req.hooks.get("response", []):
cb(resp)
return resp
class MockRequest(object):
def __init__(self, url):
self.url = url
self.headers = {}
self.hooks = {}
def register_hook(self, event_name, callback):
self.hooks.setdefault(event_name, []).append(callback)
@patch('pip._internal.download.unpack_file')
def test_unpack_http_url_bad_downloaded_checksum(mock_unpack_file):
"""
If already-downloaded file has bad checksum, re-download.
"""
base_url = 'http://www.example.com/somepackage.tgz'
contents = b'downloaded'
download_hash = hashlib.new('sha1', contents)
link = Link(base_url + '#sha1=' + download_hash.hexdigest())
session = Mock()
session.get = Mock()
response = session.get.return_value = MockResponse(contents)
response.headers = {'content-type': 'application/x-tar'}
response.url = base_url
download_dir = mkdtemp()
try:
downloaded_file = os.path.join(download_dir, 'somepackage.tgz')
create_file(downloaded_file, 'some contents')
unpack_http_url(
link,
'location',
download_dir=download_dir,
session=session,
hashes=Hashes({'sha1': [download_hash.hexdigest()]})
)
# despite existence of downloaded file with bad hash, downloaded again
session.get.assert_called_once_with(
'http://www.example.com/somepackage.tgz',
headers={"Accept-Encoding": "identity"},
stream=True,
)
# cached file is replaced with newly downloaded file
with open(downloaded_file) as fh:
assert fh.read() == 'downloaded'
finally:
rmtree(download_dir)
@pytest.mark.parametrize("filename, expected", [
('dir/file', 'file'),
('../file', 'file'),
('../../file', 'file'),
('../', ''),
('../..', '..'),
('/', ''),
])
def test_sanitize_content_filename(filename, expected):
"""
Test inputs where the result is the same for Windows and non-Windows.
"""
assert sanitize_content_filename(filename) == expected
@pytest.mark.parametrize("filename, win_expected, non_win_expected", [
('dir\\file', 'file', 'dir\\file'),
('..\\file', 'file', '..\\file'),
('..\\..\\file', 'file', '..\\..\\file'),
('..\\', '', '..\\'),
('..\\..', '..', '..\\..'),
('\\', '', '\\'),
])
def test_sanitize_content_filename__platform_dependent(
filename,
win_expected,
non_win_expected
):
"""
Test inputs where the result is different for Windows and non-Windows.
"""
if sys.platform == 'win32':
expected = win_expected
else:
expected = non_win_expected
assert sanitize_content_filename(filename) == expected
@pytest.mark.parametrize("content_disposition, default_filename, expected", [
('attachment;filename="../file"', 'df', 'file'),
])
def test_parse_content_disposition(
content_disposition,
default_filename,
expected
):
actual = parse_content_disposition(content_disposition, default_filename)
assert actual == expected
def test_download_http_url__no_directory_traversal(tmpdir):
"""
Test that directory traversal doesn't happen on download when the
Content-Disposition header contains a filename with a ".." path part.
"""
mock_url = 'http://www.example.com/whatever.tgz'
contents = b'downloaded'
link = Link(mock_url)
session = Mock()
resp = MockResponse(contents)
resp.url = mock_url
resp.headers = {
# Set the content-type to a random value to prevent
# mimetypes.guess_extension from guessing the extension.
'content-type': 'random',
'content-disposition': 'attachment;filename="../out_dir_file"'
}
session.get.return_value = resp
download_dir = tmpdir.joinpath('download')
os.mkdir(download_dir)
file_path, content_type = _download_http_url(
link,
session,
download_dir,
hashes=None,
progress_bar='on',
)
# The file should be downloaded to download_dir.
actual = os.listdir(download_dir)
assert actual == ['out_dir_file']
@pytest.mark.parametrize("url,expected", [
('http://localhost:8080/', 'http'),
('file:c:/path/to/file', 'file'),
('file:/dev/null', 'file'),
('', None),
])
def test__get_url_scheme(url, expected):
assert _get_url_scheme(url) == expected
@pytest.mark.parametrize("url,win_expected,non_win_expected", [
('file:tmp', 'tmp', 'tmp'),
('file:c:/path/to/file', r'C:\path\to\file', 'c:/path/to/file'),
('file:/path/to/file', r'\path\to\file', '/path/to/file'),
('file://localhost/tmp/file', r'\tmp\file', '/tmp/file'),
('file://localhost/c:/tmp/file', r'C:\tmp\file', '/c:/tmp/file'),
('file://somehost/tmp/file', r'\\somehost\tmp\file', None),
('file:///tmp/file', r'\tmp\file', '/tmp/file'),
('file:///c:/tmp/file', r'C:\tmp\file', '/c:/tmp/file'),
])
def test_url_to_path(url, win_expected, non_win_expected):
if sys.platform == 'win32':
expected_path = win_expected
else:
expected_path = non_win_expected
if expected_path is None:
with pytest.raises(ValueError):
url_to_path(url)
else:
assert url_to_path(url) == expected_path
@pytest.mark.skipif("sys.platform != 'win32'")
def test_url_to_path_path_to_url_symmetry_win():
path = r'C:\tmp\file'
assert url_to_path(path_to_url(path)) == path
unc_path = r'\\unc\share\path'
assert url_to_path(path_to_url(unc_path)) == unc_path
@pytest.fixture
def clean_project(tmpdir_factory, data):
tmpdir = Path(str(tmpdir_factory.mktemp("clean_project")))
new_project_dir = tmpdir.joinpath("FSPkg")
path = data.packages.joinpath("FSPkg")
shutil.copytree(path, new_project_dir)
return new_project_dir
def test_copy_source_tree(clean_project, tmpdir):
target = tmpdir.joinpath("target")
expected_files = get_filelist(clean_project)
assert len(expected_files) == 3
_copy_source_tree(clean_project, target)
copied_files = get_filelist(target)
assert expected_files == copied_files
@pytest.mark.skipif("sys.platform == 'win32' or sys.version_info < (3,)")
def test_copy_source_tree_with_socket(clean_project, tmpdir, caplog):
target = tmpdir.joinpath("target")
expected_files = get_filelist(clean_project)
socket_path = str(clean_project.joinpath("aaa"))
make_socket_file(socket_path)
_copy_source_tree(clean_project, target)
copied_files = get_filelist(target)
assert expected_files == copied_files
# Warning should have been logged.
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelname == 'WARNING'
assert socket_path in record.message
@pytest.mark.skipif("sys.platform == 'win32' or sys.version_info < (3,)")
def test_copy_source_tree_with_socket_fails_with_no_socket_error(
clean_project, tmpdir
):
target = tmpdir.joinpath("target")
expected_files = get_filelist(clean_project)
make_socket_file(clean_project.joinpath("aaa"))
unreadable_file = clean_project.joinpath("bbb")
make_unreadable_file(unreadable_file)
with pytest.raises(shutil.Error) as e:
_copy_source_tree(clean_project, target)
errored_files = [err[0] for err in e.value.args[0]]
assert len(errored_files) == 1
assert unreadable_file in errored_files
copied_files = get_filelist(target)
# All files without errors should have been copied.
assert expected_files == copied_files
def test_copy_source_tree_with_unreadable_dir_fails(clean_project, tmpdir):
target = tmpdir.joinpath("target")
expected_files = get_filelist(clean_project)
unreadable_file = clean_project.joinpath("bbb")
make_unreadable_file(unreadable_file)
with pytest.raises(shutil.Error) as e:
_copy_source_tree(clean_project, target)
errored_files = [err[0] for err in e.value.args[0]]
assert len(errored_files) == 1
assert unreadable_file in errored_files
copied_files = get_filelist(target)
# All files without errors should have been copied.
assert expected_files == copied_files
class Test_unpack_file_url(object):
def prep(self, tmpdir, data):
self.build_dir = tmpdir.joinpath('build')
self.download_dir = tmpdir.joinpath('download')
os.mkdir(self.build_dir)
os.mkdir(self.download_dir)
self.dist_file = "simple-1.0.tar.gz"
self.dist_file2 = "simple-2.0.tar.gz"
self.dist_path = data.packages.joinpath(self.dist_file)
self.dist_path2 = data.packages.joinpath(self.dist_file2)
self.dist_url = Link(path_to_url(self.dist_path))
self.dist_url2 = Link(path_to_url(self.dist_path2))
def test_unpack_file_url_no_download(self, tmpdir, data):
self.prep(tmpdir, data)
unpack_file_url(self.dist_url, self.build_dir)
assert os.path.isdir(os.path.join(self.build_dir, 'simple'))
assert not os.path.isfile(
os.path.join(self.download_dir, self.dist_file))
def test_unpack_file_url_and_download(self, tmpdir, data):
self.prep(tmpdir, data)
unpack_file_url(self.dist_url, self.build_dir,
download_dir=self.download_dir)
assert os.path.isdir(os.path.join(self.build_dir, 'simple'))
assert os.path.isfile(os.path.join(self.download_dir, self.dist_file))
def test_unpack_file_url_download_already_exists(self, tmpdir,
data, monkeypatch):
self.prep(tmpdir, data)
# add in previous download (copy simple-2.0 as simple-1.0)
# so we can tell it didn't get overwritten
dest_file = os.path.join(self.download_dir, self.dist_file)
copy(self.dist_path2, dest_file)
with open(self.dist_path2, 'rb') as f:
dist_path2_md5 = hashlib.md5(f.read()).hexdigest()
unpack_file_url(self.dist_url, self.build_dir,
download_dir=self.download_dir)
# our hash should be the same, i.e. not overwritten by simple-1.0 hash
with open(dest_file, 'rb') as f:
assert dist_path2_md5 == hashlib.md5(f.read()).hexdigest()
def test_unpack_file_url_bad_hash(self, tmpdir, data,
monkeypatch):
"""
Test when the file url hash fragment is wrong
"""
self.prep(tmpdir, data)
url = '{}#md5=bogus'.format(self.dist_url.url)
dist_url = Link(url)
with pytest.raises(HashMismatch):
unpack_file_url(dist_url,
self.build_dir,
hashes=Hashes({'md5': ['bogus']}))
def test_unpack_file_url_download_bad_hash(self, tmpdir, data,
monkeypatch):
"""
Test when existing download has different hash from the file url
fragment
"""
self.prep(tmpdir, data)
# add in previous download (copy simple-2.0 as simple-1.0 so it's wrong
# hash)
dest_file = os.path.join(self.download_dir, self.dist_file)
copy(self.dist_path2, dest_file)
with open(self.dist_path, 'rb') as f:
dist_path_md5 = hashlib.md5(f.read()).hexdigest()
with open(dest_file, 'rb') as f:
dist_path2_md5 = hashlib.md5(f.read()).hexdigest()
assert dist_path_md5 != dist_path2_md5
url = '{}#md5={}'.format(self.dist_url.url, dist_path_md5)
dist_url = Link(url)
unpack_file_url(dist_url, self.build_dir,
download_dir=self.download_dir,
hashes=Hashes({'md5': [dist_path_md5]}))
# confirm hash is for simple1-1.0
# the previous bad download has been removed
with open(dest_file, 'rb') as f:
assert hashlib.md5(f.read()).hexdigest() == dist_path_md5
def test_unpack_file_url_thats_a_dir(self, tmpdir, data):
self.prep(tmpdir, data)
dist_path = data.packages.joinpath("FSPkg")
dist_url = Link(path_to_url(dist_path))
unpack_file_url(dist_url, self.build_dir,
download_dir=self.download_dir)
assert os.path.isdir(os.path.join(self.build_dir, 'fspkg'))
@pytest.mark.parametrize('exclude_dir', [
'.nox',
'.tox'
])
def test_unpack_file_url_excludes_expected_dirs(tmpdir, exclude_dir):
src_dir = tmpdir / 'src'
dst_dir = tmpdir / 'dst'
src_included_file = src_dir.joinpath('file.txt')
src_excluded_dir = src_dir.joinpath(exclude_dir)
src_excluded_file = src_dir.joinpath(exclude_dir, 'file.txt')
src_included_dir = src_dir.joinpath('subdir', exclude_dir)
# set up source directory
src_excluded_dir.mkdir(parents=True)
src_included_dir.mkdir(parents=True)
src_included_file.touch()
src_excluded_file.touch()
dst_included_file = dst_dir.joinpath('file.txt')
dst_excluded_dir = dst_dir.joinpath(exclude_dir)
dst_excluded_file = dst_dir.joinpath(exclude_dir, | |
p1Five +=1
# else:
# p2Five +=1
# #Five to the right
# if j < 11:
# if state[i][j] == state[i][j+1] == state[i][j+2] == state[i][j+3] == state[i][j+4]:
# if state[i][j] == p1Marker:
# p1Five +=1
# else:
# p2Five +=1
# #Five to the upper right
# if i > 3 and j < 11:
# if state[i][j] == state[i-1][j+1] == state[i-2][j+2] == state[i-3][j+3] == state[i-4][j+4]:
# if state[i][j] == p1Marker:
# p1Five +=1
# else:
# p2Five +=1
# #Five to the lower right
# if i < 11 and j < 11:
# if state[i][j] == state[i+1][j+1] == state[i+2][j+2] == state[i+3][j+3] == state[i+4][j+4]:
# if state[i][j] == p1Marker:
# p1Five +=1
# else:
# p2Five +=1
# #OPEN FOURS
# if i > 0 and i < 11:
# #Open Four to the bottom
# if state[i][j] == state[i+1][j] == state[i+2][j] == state[i+3][j] and state[i-1][j] not in MARKERS and state[i+4][j] not in MARKERS:
# if state[i][j] == p1Marker:
# p1OpenFour += 1
# else:
# p2OpenFour += 1
# if j < 11 and j > 0:
# #Open Four to the right
# if state[i][j] == state[i][j+1] == state[i][j+2] == state[i][j+3] and state[i][j-1] not in MARKERS and state[i][j+4] not in MARKERS:
# if state[i][j] == p1Marker:
# p1OpenFour += 1
# else:
# p2OpenFour += 1
# #Open Four to the upper right
# if i > 3 and i < 14:
# if state[i][j] == state[i-1][j+1] == state[i-2][j+2] == state[i-3][j+3] and state[i+1][j-1] not in MARKERS and state[i-4][j+4] not in MARKERS:
# if state[i][j] == p1Marker:
# p1OpenFour += 1
# else:
# p2OpenFour += 1
# #Open Four to the lower right
# if i < 11 and i > 0:
# if state[i][j] == state[i+1][j+1] == state[i+2][j+2] == state[i+3][j+3] and state[i-1][j-1] not in MARKERS and state[i+4][j+4] not in MARKERS:
# if state[i][j] == p1Marker:
# p1OpenFour += 1
# else:
# p2OpenFour += 1
# #BROKEN OPEN FOURS
# if i < 10 and i > 0:
# #Broken Open Four to the bottom
# if (state[i][j] == state[i+2][j] == state[i+3][j] == state[i+4][j] and state[i+1][j] not in MARKERS) or (state[i][j] == state[i+1][j] == state[i+3][j] == state[i+4][j] and state[i+2][j] not in MARKERS) or (state[i][j] == state[i+1][j] == state[i+2][j] == state[i+4][j] and state[i+3][j] not in MARKERS) and state[i-1][j] not in MARKERS and state[i+5][j] not in MARKERS:
# if state[i][j] == p1Marker:
# p1BOpenFour += 1
# else:
# p2BOpenFour += 1
# if j < 10:
# #Broken Open four to the right
# if (state[i][j] == state[i][j+2] == state[i][j+3] == state[i][j+4] and state[i][j+1] not in MARKERS) or (state[i][j] == state[i][j+1] == state[i][j+3] == state[i][j+4] and state[i][j+2] not in MARKERS) or (state[i][j] == state[i][j+1] == state[i][j+2] == state[i][j+4] and state[i][j+3] not in MARKERS) and state[i][j-1] not in MARKERS and state[i][j+5] not in MARKERS:
# if state[i][j] == p1Marker:
# p1BOpenFour += 1
# else:
# p2BOpenFour += 1
# #Broken Open four to the upper right
# if i > 4 and i < 14:
# if (state[i][j] == state[i-2][j+2] == state[i-3][j+3] == state[i-4][j+4] and state[i-1][j+1] not in MARKERS) or (state[i][j] == state[i-1][j+1] == state[i-3][j+3] == state[i-4][j+4] and state[i-2][j+2] not in MARKERS) or (state[i][j] == state[i-1][j+1] == state[i-2][j+2] == state[i-4][j+4] and state[i-3][j+3] not in MARKERS) and state[i+1][j-1] not in MARKERS and state[i-5][j+5] not in MARKERS:
# if state[i][j] == p1Marker:
# p1BOpenFour += 1
# else:
# p2BOpenFour += 1
# #Broken Open four to the lower right
# if i < 10 and i > 0:
# if (state[i][j] == state[i+2][j+2] == state[i+3][j+3] == state[i+4][j+4] and state[i+1][j+1] not in MARKERS) or (state[i][j] == state[i+1][j+1] == state[i+3][j+3] == state[i+4][j+4] and state[i+2][j+2] not in MARKERS) or (state[i][j] == state[i+1][j+1] == state[i+2][j+2] == state[i+4][j+4] and state[i+3][j+3] not in MARKERS) and state[i-1][j-1] not in MARKERS and state[i+5][j+5] not in MARKERS:
# if state[i][j] == p1Marker:
# p1BOpenFour += 1
# else:
# p2BOpenFour += 1
# #HALF OPEN FOURS
# if i < 12:
# #Half Open Four to the bottom
# if state[i][j] == state[i+1][j] == state[i+2][j] == state[i+3][j]:
# if i is 0:
# if state[i+4][j] not in MARKERS:
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# if i is 11:
# if state[i-1][j] not in MARKERS:
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# else:
# if (state[i-1][j] not in MARKERS and state[i+4][j] in MARKERS) or (state[i-1][j] in MARKERS and state[i+4][j] not in MARKERS):
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# if j < 12:
# #Half Open Four to the right
# if state[i][j] == state[i][j+1] == state[i][j+2] == state[i][j+3]:
# if j is 0:
# if state[i][j+4] not in MARKERS:
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# if j is 11:
# if state[i][j-1] not in MARKERS:
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# else:
# if (state[i][j-1] not in MARKERS and state[i][j+4] in MARKERS) or (state[i][j-1] in MARKERS and state[i][j+4] not in MARKERS):
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# if i > 2:
# #Half Open Four to the upper right
# if state[i][j] == state[i-1][j+1] == state[i-2][j+2] == state[i-3][j+3]:
# if j is 0:
# if i > 3:
# if state[i-4][j+4] not in MARKERS:
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# elif j is 11:
# if i < 14:
# if state[i+1][j-1] not in MARKERS:
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# elif i is 14:
# if j < 11:
# if state[i-4][j+4] not in MARKERS:
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# elif i is 3:
# if j > 0:
# if state[i+1][j-1] not in MARKERS:
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# else:
# if (state[i+1][j-1] not in MARKERS and state[i-4][j+4] in MARKERS) or (state[i+1][j-1] in MARKERS and state[i-4][j+4] not in MARKERS):
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# if i < 12:
# #Half Open Four to the lower right
# if state[i][j] == state[i+1][j+1] == state[i+2][j+2] == state[i+3][j+3]:
# if j is 0:
# if i < 11:
# if state[i+4][j+4] not in MARKERS:
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# elif j is 11:
# if i > 0:
# if state[i-1][j-1] not in MARKERS:
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# elif i is 0:
# if j < 11:
# if state[i+4][j+4] not in MARKERS:
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# elif i is 11:
# if j > 0:
# if state[i-1][j-1] not in MARKERS:
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# else:
# if (state[i-1][j-1] not in MARKERS and state[i+4][j+4] in MARKERS) or (state[i-1][j-1] in MARKERS and state[i+4][j+4] not in MARKERS):
# if state[i][j] == p1Marker:
# p1HOpenFour += 1
# else:
# p2HOpenFour += 1
# #OPEN THREES
# if i > 0 and i < 12:
# #Open Three to the bottom
# if state[i][j] == state[i+1][j] == state[i+2][j] and state[i-1][j] not in MARKERS and state[i+3][j] not in MARKERS:
# if state[i][j] == p1Marker:
# p1OpenThree += 1
# else:
# p2OpenThree += 1
# if j < 12 and j > 0:
# #Open Three to the right
# if state[i][j] == state[i][j+1] == state[i][j+2] and state[i][j-1] not in MARKERS and state[i][j+3] not in MARKERS:
# if state[i][j] == p1Marker:
# p1OpenThree += 1
# else:
# p2OpenThree += 1
# #Open Three to the upper right
# if i > 2 and i < 14:
# if state[i][j] == state[i-1][j+1] == state[i-2][j+2] and state[i+1][j-1] not in MARKERS and state[i-3][j+3] not in MARKERS:
# if state[i][j] == p1Marker:
# p1OpenThree += 1
# else:
# p2OpenThree += 1
# #Open Three to the lower right
# if i < 12 and i > 0:
# if state[i][j] == state[i+1][j+1] == state[i+2][j+2] and state[i-1][j-1] not in MARKERS and state[i+3][j+3] not in MARKERS:
# if state[i][j] == p1Marker:
# p1OpenThree += 1
# else:
# p2OpenThree += 1
# #BROKEN OPEN THREES
# if i < 11 and i > 0:
# #Broken Open Three to the bottom
# if (state[i][j] == state[i+2][j] == state[i+3][j] and state[i+1][j] not in MARKERS) or (state[i][j] == state[i+1][j] == state[i+3][j] and state[i+2][j] not in MARKERS) and state[i-1][j] not in MARKERS and state[i+4][j] not in MARKERS:
# if state[i][j] == p1Marker:
# p1BOpenThree += 1
# else:
# p2BOpenThree += 1
# if j < 11 and j > 0:
# #Broken Open Three to the right
# if (state[i][j] == state[i][j+2] == state[i][j+3] and state[i][j+1] | |
from collections.abc import Callable, Iterable, Mapping
from neuwon.database import memory_spaces
from neuwon.database.database import DB_Class, DB_Object, Database
from neuwon.database.doc import Documentation
from neuwon.database.dtypes import *
import cupy
import numpy as np
class DataComponent(Documentation):
""" Abstract class for all types of data storage. """
def __init__(self, db_class, name,
doc, units, shape, dtype, initial_value, allow_invalid, valid_range):
Documentation.__init__(self, name, doc)
assert isinstance(db_class, DB_Class)
assert self.name not in db_class.components
assert self.name not in db_class.methods
self.db_class = db_class
self.db_class.components[self.name] = self
self.qualname = f'{self.db_class.name}.{self.name}'
if shape is None: pass
elif isinstance(shape, Iterable):
self.shape = tuple(round(x) for x in shape)
else:
self.shape = (round(shape),)
if isinstance(dtype, type) and issubclass(dtype, DB_Object):
dtype = dtype.get_database_class()
if isinstance(dtype, str) or isinstance(dtype, DB_Class):
self.dtype = Pointer
self.initial_value = NULL
self.reference = self.db_class.database.get_class(dtype)
self.reference.referenced_by.append(self)
else:
self.dtype = np.dtype(dtype)
if initial_value is None:
self.initial_value = None
else:
self.initial_value = self.dtype.type(initial_value)
self.reference = False
self.units = str(units)
self.allow_invalid = bool(allow_invalid)
if self.reference is self.db_class: assert self.allow_invalid
self.valid_range = tuple(valid_range)
if None not in self.valid_range: self.valid_range = tuple(sorted(self.valid_range))
assert len(self.valid_range) == 2
self.memory_space = self.db_class.database.memory_space
setattr(self.db_class.instance_type, self.name,
property(self._getter_wrapper, self._setter_wrapper, doc=self.doc,))
def _getter_wrapper(self, instance):
if instance._idx == NULL:
raise ValueError("Object was destroyed!")
return self._getter(instance)
def _setter_wrapper(self, instance, value):
if instance._idx == NULL:
raise ValueError("Object was destroyed!")
self._setter(instance, value)
def _getter(self, instance):
""" Get the instance's data value. """
raise NotImplementedError(type(self))
def _setter(self, instance, value):
""" Set the instance's data value. """
raise NotImplementedError(type(self))
def get_data(self):
""" Returns all data for this component. """
raise NotImplementedError(type(self))
def set_data(self, value):
""" Replace this entire data component with a new set of values. """
raise NotImplementedError(type(self))
def get_units(self) -> str: return self.units
def get_dtype(self) -> np.dtype: return self.dtype
def get_shape(self) -> tuple: return self.shape
def get_initial_value(self) -> object: return self.initial_value
def get_class(self) -> DB_Class: return self.db_class
def get_database(self) -> Database: return self.db_class.database
def get_memory_space(self) -> str:
"""
Returns the current location of this data component:
Returns "host" when located in python's memory space.
Returns "cuda" when located in CUDA's memory space.
"""
return self.memory_space
def free(self):
"""
Release the memory used by this data component. The next time the data
is accessed it will be reallocated and set to its initial_value.
"""
raise NotImplementedError(type(self))
def is_free(self):
return self.data is None
def check(self):
""" Check data for values which are: NaN, NULL, or Out of bounds. """
data = self.get_data()
if isinstance(self, ClassAttribute): data = np.array([data])
elif isinstance(self, SparseMatrix): data = data.data
xp = cupy.get_array_module(data)
if not self.allow_invalid:
if self.reference:
assert xp.all(xp.less(data, self.reference.size)), self.name + " is NULL"
else:
if data.dtype.kind in ("f", "c"):
assert not xp.any(xp.isnan(data)), self.name + " is NaN"
lower_bound, upper_bound = self.valid_range
if lower_bound is not None:
assert xp.all(xp.less_equal(lower_bound, data)), self.name + " less than %g"%lower_bound
if upper_bound is not None:
assert xp.all(xp.greater_equal(upper_bound, data)), self.name + " greater than %g"%upper_bound
def _remove_references_to_destroyed(self):
raise NotImplementedError(type(self))
def _type_info(self):
s = ""
if self.reference: s += "ref:" + self.reference.name
else: s += self.dtype.name
if self.shape != (1,): s += repr(list(self.shape))
return s
def __repr__(self):
return "<%s: %s.%s %s>"%(type(self).__name__, self.db_class.name, self.name, self._type_info())
_units_doc = """
Argument units is an optional documentation string for physical units.
"""
_dtype_doc = """
Argument dtype is the data type for this data component. It is either:
* An instance of numpy.dtype
* A DB_Class or its name, to make pointers to instances of that class.
"""
_shape_doc = """
Argument shape is the allocation size / shape for this data component.
"""
_allow_invalid_doc = """
Argument allow_invalid controls whether NaN or NULL values are permissible.
"""
_valid_range_doc = """
Argument valid_range is pair of numbers (min, max) defining an inclusive
range of permissible values.
"""
class Attribute(DataComponent):
""" This is the database's internal representation of an instance variable. """
def __init__(self, db_class, name:str, initial_value=None, dtype=Real, shape=(1,),
doc:str="", units:str="", allow_invalid:bool=False, valid_range=(None, None),):
""" Add an instance variable to a class type.
Argument initial_value is written to new instances of this attribute.
This is applied before "base_class.__init__" is called.
Optional, if not given then the data will not be initialized.
"""
DataComponent.__init__(self, db_class, name,
doc=doc, units=units, dtype=dtype, shape=shape, initial_value=initial_value,
allow_invalid=allow_invalid, valid_range=valid_range)
self.data = self._alloc(len(self.db_class))
if self.initial_value is not None:
self.data.fill(self.initial_value)
__init__.__doc__ += "".join((
DataComponent._dtype_doc,
DataComponent._shape_doc,
Documentation._doc_doc,
DataComponent._units_doc,
DataComponent._allow_invalid_doc,
DataComponent._valid_range_doc,))
def free(self):
self.data = None
def _alloc_if_free(self):
if self.data is None:
self.data = self._alloc(len(self.db_class))
if self.initial_value is not None:
self.data.fill(self.initial_value)
def _getter(self, instance):
if self.data is None: return self.initial_value
value = self.data[instance._idx]
if hasattr(value, 'get'): value = value.get()
if self.reference:
return self.reference.index_to_object(value)
return self.dtype.type(value)
def _setter(self, instance, value):
self._alloc_if_free()
if self.reference:
if value is None:
value = NULL
else:
assert isinstance(value, self.reference.instance_type)
value = value._idx
self.data[instance._idx] = value
def _append(self, old_size, new_size):
""" Prepare space for new instances at the end of the array. """
if self.data is None: return
if len(self.data) < new_size:
new_data = self._alloc(2 * new_size)
new_data[:old_size] = self.data[:old_size]
self.data = new_data
if self.initial_value is not None:
self.data[old_size:new_size].fill(self.initial_value)
def _alloc(self, size):
""" Returns an empty array. """
# TODO: IIRC CuPy can not deal with numpy structured arrays...
# Detect this issue and revert to using numba arrays.
# numba.cuda.to_device(numpy.array(data, dtype=dtype))
shape = (size,)
if self.shape != (1,): # Don't append empty trailing dimension.
shape += self.shape
return self.memory_space.array_module.empty(shape, dtype=self.dtype)
def _transfer(self, target_space):
if self.data is None:
self.memory_space = target_space
elif self.memory_space is not target_space:
if self.memory_space is memory_spaces.host:
if target_space is memory_spaces.cuda:
self.data = memory_spaces.cuda.array(self.data)
else: raise NotImplementedError(target_space)
elif self.memory_space is memory_spaces.cuda:
if target_space is memory_spaces.host:
self.data = self.data.get()
else: raise NotImplementedError(target_space)
else: raise NotImplementedError(self.memory_space)
self.memory_space = target_space
def get_data(self):
""" Returns either "numpy.ndarray" or "cupy.ndarray" """
self._transfer(self.db_class.database.memory_space)
self._alloc_if_free()
return self.data[:len(self.db_class)]
def set_data(self, value):
size = len(self.db_class)
assert len(value) == size
if self.shape == (1,):
shape = (size,) # Don't append empty trailing dimension.
else:
shape = (size,) + self.shape
# TODO: This should accept whatever memory space it is given, and avoid
# transfering until someone calls "get_data".
self.data = self.memory_space.array(value, dtype=self.dtype).reshape(shape)
def _remove_references_to_destroyed(self):
if not self.reference: return
if self.data is None: return
pointer_data = self.data[:len(self.db_class)]
destroyed_mask = self.reference.destroyed_mask
if destroyed_mask is None: return
xp = cupy.get_array_module(destroyed_mask)
target_is_dead = xp.take(destroyed_mask, pointer_data, axis=0, mode='clip')
target_is_dead[pointer_data == NULL] = True
target_is_dead = xp.nonzero(target_is_dead)[0]
pointer_data[target_is_dead] = NULL
if not self.allow_invalid:
for idx in target_is_dead:
db_obj = self.db_class.index_to_object(idx)
if db_obj is not None:
db_obj.destroy()
destroyed_mask[idx] = True
class ClassAttribute(DataComponent):
""" This is the database's internal representation of a class variable. """
def __init__(self, db_class, name:str, initial_value,
dtype=Real, shape=(1,),
doc:str="", units:str="",
allow_invalid:bool=False, valid_range=(None, None),):
""" Add a class variable to a class type.
All instance of the class will use a single shared value for this attribute.
Argument initial_value is required.
"""
DataComponent.__init__(self, db_class, name,
dtype=dtype, shape=shape, doc=doc, units=units, initial_value=initial_value,
allow_invalid=allow_invalid, valid_range=valid_range)
self.data = self.initial_value
self.memory_space = memory_spaces.host
if self.reference: raise NotImplementedError
__init__.__doc__ += "".join((
DataComponent._dtype_doc,
DataComponent._shape_doc,
Documentation._doc_doc,
DataComponent._units_doc,
DataComponent._allow_invalid_doc,
DataComponent._valid_range_doc,
))
def _getter(self, instance):
return self.data
def _setter(self, instance, value):
self.data = self.dtype.type(value)
def get_data(self):
return self.data
def set_data(self, value):
self.data = self.dtype.type(value)
def free(self):
self.data = self.initial_value
def _remove_references_to_destroyed(self):
pass
class SparseMatrix(DataComponent):
""" """ # TODO-DOC
# TODO: Consider adding more write methods:
# 1) Write rows. (done)
# 2) Insert coordinates.
# Notes: first convert format to either lil or coo
# 3) Overwrite the matrix. (done)
# TODO: Figure out if/when to call mat.eliminate_zeros() and sort too.
def __init__(self, db_class, name, column, dtype=Real, doc:str="", units:str="",
allow_invalid:bool=False, valid_range=(None, None),):
"""
Add a sparse matrix that is indexed by DB_Objects. This is useful for
implementing any-to-any connections between entities.
This db_class is the index for the rows of the sparse matrix.
Argument column refers to the db_class which is the index for the
columns of the sparse matrix.
"""
DataComponent.__init__(self, db_class, name,
dtype=dtype, shape=None, doc=doc, units=units, initial_value=0.,
allow_invalid=allow_invalid, valid_range=valid_range)
self.column = self.db_class.database.get_class(column)
self.column.referenced_by_matrix_columns.append(self)
self.shape = (len(self.db_class), len(self.column))
self.fmt = 'csr'
self.free()
if self.reference: raise NotImplementedError
__init__.__doc__ += "".join((
DataComponent._dtype_doc,
Documentation._doc_doc,
DataComponent._units_doc,
DataComponent._allow_invalid_doc,
DataComponent._valid_range_doc,))
@property
def _matrix_class(self):
if self.fmt == 'lil': return self.memory_space.matrix_module.lil_matrix
elif self.fmt == | |
#!/usr/bin/env vpython3
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import os
import sys
import unittest
import six
# Same reasoning as below.
if six.PY3:
import urllib.error
# This script is not Python 2-compatible, but some presubmit scripts end up
# trying to parse this to find tests.
# TODO(crbug.com/1198237): Remove this once all the GPU tests, and by
# extension the presubmit scripts, are Python 3-compatible.
if six.PY3:
import unittest.mock as mock
import validate_tag_consistency
from pyfakefs import fake_filesystem_unittest
from flake_suppressor import expectations
from flake_suppressor import unittest_utils as uu
# Note for all tests in this class: We can safely check the contents of the file
# at the end despite potentially having multiple added lines because Python 3.7+
# guarantees that dictionaries remember insertion order, so there is no risk of
# the order of modification changing.
@unittest.skipIf(sys.version_info[0] != 3, 'Python 3-only')
class IterateThroughResultsForUserUnittest(fake_filesystem_unittest.TestCase):
def setUp(self):
self._new_stdout = open(os.devnull, 'w')
self.setUpPyfakefs()
# Redirect stdout since the tested function prints a lot.
self._old_stdout = sys.stdout
sys.stdout = self._new_stdout
self._input_patcher = mock.patch.object(expectations,
'PromptUserForExpectationAction')
self._input_mock = self._input_patcher.start()
self.addCleanup(self._input_patcher.stop)
self.result_map = {
'pixel_integration_test': {
'foo_test': {
tuple(['win']): ['a'],
tuple(['mac']): ['b'],
},
'bar_test': {
tuple(['win']): ['c'],
},
},
}
self.expectation_file = os.path.join(
expectations.ABSOLUTE_EXPECTATION_FILE_DIRECTORY,
'pixel_expectations.txt')
uu.CreateFile(self, self.expectation_file)
expectation_file_contents = validate_tag_consistency.TAG_HEADER + """\
[ win ] some_test [ Failure ]
[ mac ] some_test [ Failure ]
[ android ] some_test [ Failure ]
"""
with open(self.expectation_file, 'w') as outfile:
outfile.write(expectation_file_contents)
def tearDown(self):
sys.stdout = self._old_stdout
self._new_stdout.close()
def testIterateThroughResultsForUserIgnoreNoGroupByTags(self):
"""Tests that everything appears to function with ignore and no group."""
self._input_mock.return_value = (None, None)
expectations.IterateThroughResultsForUser(self.result_map, False)
expected_contents = validate_tag_consistency.TAG_HEADER + """\
[ win ] some_test [ Failure ]
[ mac ] some_test [ Failure ]
[ android ] some_test [ Failure ]
"""
with open(self.expectation_file) as infile:
self.assertEqual(infile.read(), expected_contents)
def testIterateThroughResultsForUserIgnoreGroupByTags(self):
"""Tests that everything appears to function with ignore and grouping."""
self._input_mock.return_value = (None, None)
expectations.IterateThroughResultsForUser(self.result_map, True)
expected_contents = validate_tag_consistency.TAG_HEADER + """\
[ win ] some_test [ Failure ]
[ mac ] some_test [ Failure ]
[ android ] some_test [ Failure ]
"""
with open(self.expectation_file) as infile:
self.assertEqual(infile.read(), expected_contents)
def testIterateThroughResultsForUserRetryNoGroupByTags(self):
"""Tests that everything appears to function with retry and no group."""
self._input_mock.return_value = ('RetryOnFailure', '')
expectations.IterateThroughResultsForUser(self.result_map, False)
expected_contents = validate_tag_consistency.TAG_HEADER + """\
[ win ] some_test [ Failure ]
[ mac ] some_test [ Failure ]
[ android ] some_test [ Failure ]
[ win ] foo_test [ RetryOnFailure ]
[ mac ] foo_test [ RetryOnFailure ]
[ win ] bar_test [ RetryOnFailure ]
"""
with open(self.expectation_file) as infile:
self.assertEqual(infile.read(), expected_contents)
def testIterateThroughResultsForUserRetryGroupByTags(self):
"""Tests that everything appears to function with retry and grouping."""
self._input_mock.return_value = ('RetryOnFailure', 'crbug.com/1')
expectations.IterateThroughResultsForUser(self.result_map, True)
expected_contents = validate_tag_consistency.TAG_HEADER + """\
[ win ] some_test [ Failure ]
crbug.com/1 [ win ] foo_test [ RetryOnFailure ]
crbug.com/1 [ win ] bar_test [ RetryOnFailure ]
[ mac ] some_test [ Failure ]
crbug.com/1 [ mac ] foo_test [ RetryOnFailure ]
[ android ] some_test [ Failure ]
"""
with open(self.expectation_file) as infile:
self.assertEqual(infile.read(), expected_contents)
def testIterateThroughResultsForUserFailNoGroupByTags(self):
"""Tests that everything appears to function with failure and no group."""
self._input_mock.return_value = ('Failure', 'crbug.com/1')
expectations.IterateThroughResultsForUser(self.result_map, False)
expected_contents = validate_tag_consistency.TAG_HEADER + """\
[ win ] some_test [ Failure ]
[ mac ] some_test [ Failure ]
[ android ] some_test [ Failure ]
crbug.com/1 [ win ] foo_test [ Failure ]
crbug.com/1 [ mac ] foo_test [ Failure ]
crbug.com/1 [ win ] bar_test [ Failure ]
"""
with open(self.expectation_file) as infile:
self.assertEqual(infile.read(), expected_contents)
def testIterateThroughResultsForUserFailGroupByTags(self):
"""Tests that everything appears to function with failure and grouping."""
self._input_mock.return_value = ('Failure', '')
expectations.IterateThroughResultsForUser(self.result_map, True)
expected_contents = validate_tag_consistency.TAG_HEADER + """\
[ win ] some_test [ Failure ]
[ win ] foo_test [ Failure ]
[ win ] bar_test [ Failure ]
[ mac ] some_test [ Failure ]
[ mac ] foo_test [ Failure ]
[ android ] some_test [ Failure ]
"""
with open(self.expectation_file) as infile:
self.assertEqual(infile.read(), expected_contents)
@unittest.skipIf(sys.version_info[0] != 3, 'Python 3-only')
class FindFailuresInSameConditionUnittest(unittest.TestCase):
def setUp(self):
self.result_map = {
'pixel_integration_test': {
'foo_test': {
tuple(['win']): ['a'],
tuple(['mac']): ['a', 'b'],
},
'bar_test': {
tuple(['win']): ['a', 'b', 'c'],
tuple(['mac']): ['a', 'b', 'c', 'd'],
},
},
'webgl_conformance_integration_test': {
'foo_test': {
tuple(['win']): ['a', 'b', 'c', 'd', 'e'],
tuple(['mac']): ['a', 'b', 'c', 'd', 'e', 'f'],
},
'bar_test': {
tuple(['win']): ['a', 'b', 'c', 'd', 'e', 'f', 'g'],
tuple(['mac']): ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'],
},
},
}
def testFindFailuresInSameTest(self):
other_failures = expectations.FindFailuresInSameTest(
self.result_map, 'pixel_integration_test', 'foo_test', ['win'])
self.assertEqual(other_failures, [(tuple(['mac']), 2)])
def testFindFailuresInSameConfig(self):
typ_tag_ordered_result_map = expectations._ReorderMapByTypTags(
self.result_map)
other_failures = expectations.FindFailuresInSameConfig(
typ_tag_ordered_result_map, 'pixel_integration_test', 'foo_test',
['win'])
expected_other_failures = [
('pixel_integration_test.bar_test', 3),
('webgl_conformance_integration_test.foo_test', 5),
('webgl_conformance_integration_test.bar_test', 7),
]
self.assertEqual(len(other_failures), len(expected_other_failures))
self.assertEqual(set(other_failures), set(expected_other_failures))
@unittest.skipIf(sys.version_info[0] != 3, 'Python 3-only')
class ModifyFileForResultUnittest(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
self.expectation_file = os.path.join(
expectations.ABSOLUTE_EXPECTATION_FILE_DIRECTORY, 'expectation.txt')
uu.CreateFile(self, self.expectation_file)
self._expectation_file_patcher = mock.patch.object(
expectations, 'GetExpectationFileForSuite')
self._expectation_file_mock = self._expectation_file_patcher.start()
self.addCleanup(self._expectation_file_patcher.stop)
self._expectation_file_mock.return_value = self.expectation_file
def testNoGroupByTags(self):
"""Tests that not grouping by tags appends to the end."""
expectation_file_contents = validate_tag_consistency.TAG_HEADER + """\
[ win ] some_test [ Failure ]
[ mac ] some_test [ Failure ]
"""
with open(self.expectation_file, 'w') as outfile:
outfile.write(expectation_file_contents)
expectations.ModifyFileForResult(None, 'some_test', ['win', 'win10'], '',
'Failure', False)
expected_contents = validate_tag_consistency.TAG_HEADER + """\
[ win ] some_test [ Failure ]
[ mac ] some_test [ Failure ]
[ win win10 ] some_test [ Failure ]
"""
with open(self.expectation_file) as infile:
self.assertEqual(infile.read(), expected_contents)
def testGroupByTagsNoMatch(self):
"""Tests that grouping by tags but finding no match appends to the end."""
expectation_file_contents = validate_tag_consistency.TAG_HEADER + """\
[ mac ] some_test [ Failure ]
"""
with open(self.expectation_file, 'w') as outfile:
outfile.write(expectation_file_contents)
expectations.ModifyFileForResult(None, 'some_test', ['win', 'win10'], '',
'Failure', True)
expected_contents = validate_tag_consistency.TAG_HEADER + """\
[ mac ] some_test [ Failure ]
[ win win10 ] some_test [ Failure ]
"""
with open(self.expectation_file) as infile:
self.assertEqual(infile.read(), expected_contents)
def testGroupByTagsMatch(self):
"""Tests that grouping by tags and finding a match adds mid-file."""
expectation_file_contents = validate_tag_consistency.TAG_HEADER + """\
[ win ] some_test [ Failure ]
[ mac ] some_test [ Failure ]
"""
with open(self.expectation_file, 'w') as outfile:
outfile.write(expectation_file_contents)
expectations.ModifyFileForResult(None, 'foo_test', ['win', 'win10'], '',
'Failure', True)
expected_contents = validate_tag_consistency.TAG_HEADER + """\
[ win ] some_test [ Failure ]
[ win ] foo_test [ Failure ]
[ mac ] some_test [ Failure ]
"""
with open(self.expectation_file) as infile:
self.assertEqual(infile.read(), expected_contents)
@unittest.skipIf(sys.version_info[0] != 3, 'Python 3-only')
class GetExpectationFileForSuiteUnittest(unittest.TestCase):
def testRegularExpectationFile(self):
"""Tests that a regular expectation file is found properly."""
expected_filepath = os.path.join(
expectations.ABSOLUTE_EXPECTATION_FILE_DIRECTORY,
'pixel_expectations.txt')
actual_filepath = expectations.GetExpectationFileForSuite(
'pixel_integration_test', ['webgl-version-2'])
self.assertEqual(actual_filepath, expected_filepath)
def testOverrideExpectationFile(self):
"""Tests that an overridden expectation file is found properly."""
expected_filepath = os.path.join(
expectations.ABSOLUTE_EXPECTATION_FILE_DIRECTORY,
'info_collection_expectations.txt')
actual_filepath = expectations.GetExpectationFileForSuite(
'info_collection_test', ['webgl-version-2'])
self.assertEqual(actual_filepath, expected_filepath)
def testWebGl1Conformance(self):
"""Tests that a WebGL 1 expectation file is found properly."""
expected_filepath = os.path.join(
expectations.ABSOLUTE_EXPECTATION_FILE_DIRECTORY,
'webgl_conformance_expectations.txt')
actual_filepath = expectations.GetExpectationFileForSuite(
'webgl_conformance_integration_test', [])
self.assertEqual(actual_filepath, expected_filepath)
def testWebGl2Conformance(self):
"""Tests that a WebGL 2 expectation file is found properly."""
expected_filepath = os.path.join(
expectations.ABSOLUTE_EXPECTATION_FILE_DIRECTORY,
'webgl2_conformance_expectations.txt')
actual_filepath = expectations.GetExpectationFileForSuite(
'webgl_conformance_integration_test', ['webgl-version-2'])
self.assertEqual(actual_filepath, expected_filepath)
@unittest.skipIf(sys.version_info[0] != 3, 'Python 3-only')
class FindBestInsertionLineForExpectationUnittest(
fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
self.expectation_file = os.path.join(
expectations.ABSOLUTE_EXPECTATION_FILE_DIRECTORY, 'expectation.txt')
uu.CreateFile(self, self.expectation_file)
expectation_file_contents = validate_tag_consistency.TAG_HEADER + """\
[ win ] some_test [ Failure ]
[ mac ] some_test [ Failure ]
[ win release ] bar_test [ Failure ]
[ win ] foo_test [ Failure ]
[ chromeos ] some_test [ Failure ]
"""
with open(self.expectation_file, 'w') as outfile:
outfile.write(expectation_file_contents)
def testNoMatchingTags(self):
"""Tests behavior when there are no expectations with matching tags."""
insertion_line, tags = expectations.FindBestInsertionLineForExpectation(
['android'], self.expectation_file)
self.assertEqual(insertion_line, -1)
self.assertEqual(tags, set())
def testMatchingTagsLastEntryChosen(self):
"""Tests that the last matching line is chosen."""
insertion_line, tags = expectations.FindBestInsertionLineForExpectation(
['win'], self.expectation_file)
# We expect "[ win ] foo_test [ Failure ]" to be chosen
expected_line = len(validate_tag_consistency.TAG_HEADER.splitlines()) + 6
self.assertEqual(insertion_line, expected_line)
self.assertEqual(tags, set(['win']))
def testMatchingTagsClosestMatchChosen(self):
"""Tests that the closest tag match is chosen."""
insertion_line, tags = expectations.FindBestInsertionLineForExpectation(
['win', 'release'], self.expectation_file)
# We expect "[ win release ] bar_test [ Failure ]" to be chosen
expected_line = len(validate_tag_consistency.TAG_HEADER.splitlines()) + 5
self.assertEqual(insertion_line, expected_line)
self.assertEqual(tags, set(['win', 'release']))
class GetExpectationFilesFromOriginUnittest(unittest.TestCase):
class FakeRequestResult(object):
def __init__(self):
self.text = ''
def read(self):
return self.text
def setUp(self):
self._get_patcher = mock.patch(
'flake_suppressor.expectations.urllib.request.urlopen')
self._get_mock = self._get_patcher.start()
self.addCleanup(self._get_patcher.stop)
def testBasic(self):
"""Tests basic functionality along the happy path."""
def SideEffect(url):
request_result = GetExpectationFilesFromOriginUnittest.FakeRequestResult()
text = ''
if url.endswith('test_expectations?format=TEXT'):
text = """\
mode type hash foo_tests.txt
mode type hash bar_tests.txt"""
elif url.endswith('foo_tests.txt?format=TEXT'):
text = 'foo_tests.txt content'
elif url.endswith('bar_tests.txt?format=TEXT'):
text = 'bar_tests.txt content'
else:
self.fail('Given unhandled URL %s' % url)
request_result.text = base64.b64encode(text.encode('utf-8'))
return request_result
self._get_mock.side_effect = SideEffect
| |
new_loc = DFLocation()
new_loc.set_to_other(self)
return new_loc
def _exec_arithmetic(
self, other: typing.Union[
"DFLocation",
typing.Optional[typing.Iterable[AnyNumber]],
AnyNumber
], arithmetic: typing.Callable, *, op_name: str
):
"""Executes some arithmetic within the Location and returns a new one.
Parameters
----------
other : Union[:class:`DFLocation`, Optional[Iterable[Union[:class:`int`, :class:`float`]], \
`Union`[`:class:`int``, `:class:`float``]]
The other :class:`DFLocation`, an iterable of the form (x,y,z,pitch,yaw) (use None on any spot to keep
unchanged), or a :class:`float` that is distributed upon x,y,z.
arithmetic : `Callable`
The function that executes the operation.
op_name : `:class:`str``
The operation name, to be used in the Error message.
Returns
-------
:class:`DFLocation`
New :class:`DFLocation`.
Raises
------
:exc:`TypeError`
Invalid type provided for arithmetic.
"""
new_loc = self.copy()
if type(other) == DFLocation:
x, y, z, pitch, yaw = map(float, (other.x, other.y, other.z, other.pitch, other.yaw))
elif isinstance(other, collections.Iterable):
attr_list = [None] * 5 # initialize an empty array
for i, value in enumerate(other):
if i > len(attr_list) - 1:
break # we don't need more values.
attr_list[i] = value
x, y, z, pitch, yaw = attr_list
elif type(other) in (int, float):
num = float(other)
x, y, z = [num] * 3
pitch, yaw = [None] * 2
else:
return NotImplemented
for attr, val in zip(("x", "y", "z"), (x, y, z)):
if val is None:
continue # keep current value
old_val = getattr(new_loc, attr)
if op_name == "division" and old_val == val == 0:
continue # nope; division by zero
setattr(new_loc, attr, float(arithmetic(float(old_val), float(val))))
for mod_attr, val, max_deg in zip(
("pitch", "yaw"), (pitch, yaw), (MAX_PITCH_DEGREES, MAX_YAW_DEGREES)
): # gotta do this mod 360, they're rotation values.
if val is None:
continue # keep current value
old_val = getattr(new_loc, mod_attr)
if op_name == "division" and old_val == val == 0:
continue # nope; division by zero
result_val = float(
arithmetic(float(old_val), float(val))
)
setattr(
new_loc, mod_attr, math.copysign(abs(result_val) % max_deg, result_val)
) # mod 90/180 degrees, keeping the sign (- or +).
return new_loc
# def to_item(self) -> "Item":
# pass # TODO: paper thing
def is_near(
self, center_val: "Locatable", valid_range: "Numeric"
) -> "IfVariable":
"""Checks if this DFLocation is within a certain distance of another location var.
Note that this method is also implemented within :class:`~.VarOperable` (i.e., :class:`~.DFGameValue`
and :class:`~.DFVariable`) and :class:`DFNumber`.
Parameters
----------
center_val : :attr:`~.Locatable`
The location to be compared with `self`.
valid_range : :attr:`~.Numeric`
The accepted distance between `self` and `center_val`.
Returns
-------
:class:`~.IfVariable`
The generated IfVariable codeblock for this condition.
Examples
--------
::
with DFLocation(1, 2, 3).is_near(var, 10):
# ... code to execute in DF if 2 is at most at a distance of 10 blocks from var ...
"""
from ..typings import p_check, Locatable, Numeric # lazy import to avoid cyclic imports
from ..codeblocks import IfVariable
from .collections import Arguments
args = Arguments(
[
self,
p_check(center_val, Locatable, "center_val"),
p_check(valid_range, Numeric, "valid_range")
]
)
return IfVariable(
action=IfVariableType.IS_NEAR,
args=args,
append_to_reader=False,
invert=False
)
def in_range(
self, min_loc: "Locatable", max_loc: "Locatable"
) -> "IfVariable":
"""Checks if this location is within the region formed by 2 other locations (the corners).
Parameters
----------
min_loc : :attr:`~.Locatable`
The first corner of the region to check.
max_loc : :attr:`~.Locatable`
The second corner, forming a region with min_loc; the code will execute if `self` (location) is within
that region.
Returns
-------
:class:`~.IfVariable`
The generated IfVariable codeblock for this condition.
See Also
--------
:meth:`~.VarOperable.in_range`
Examples
--------
::
with DFLocation(1, 2, 3).in_range(var_a, var_b):
# ... code that is only executed in DF if the location of x,y,z = 1,2,3 is within var_a and var_b ...
"""
from ..typings import p_check, Locatable # lazy import to avoid cyclic imports
from ..codeblocks import IfVariable
from .collections import Arguments
args = Arguments(
[
self,
p_check(min_loc, Locatable, "min_val"),
p_check(max_loc, Locatable, "max_val")
]
)
return IfVariable(
action=IfVariableType.IN_RANGE,
args=args,
append_to_reader=False,
invert=False
)
def __eq__(self, other: "DFLocation") -> bool:
attrs_to_check = set(self.__class__.__slots__) # - {"world_least", "world_most"}
return type(self) == type(other) and all(getattr(self, attr) == getattr(other, attr) for attr in attrs_to_check)
def __ne__(self, other: "DFLocation"):
return not self.__eq__(other)
def __gt__(self, other: "DFLocation") -> bool:
if not isinstance(other, type(self)):
return NotImplemented
positional_attrs = ("x", "y", "z")
return any(getattr(self, attr) > getattr(other, attr) for attr in positional_attrs)
def __ge__(self, other: "DFLocation") -> bool:
if not isinstance(other, type(self)):
return NotImplemented
positional_attrs = ("x", "y", "z")
return all(getattr(self, attr) >= getattr(other, attr) for attr in positional_attrs)
def __lt__(self, other: "DFLocation") -> bool:
if not isinstance(other, type(self)):
return NotImplemented
positional_attrs = ("x", "y", "z")
return any(getattr(self, attr) < getattr(other, attr) for attr in positional_attrs)
def __le__(self, other: "DFLocation") -> bool:
if not isinstance(other, type(self)):
return NotImplemented
positional_attrs = ("x", "y", "z")
return all(getattr(self, attr) <= getattr(other, attr) for attr in positional_attrs)
def __repr__(self):
return f"<{self.__class__.__name__} x={self.x} y={self.y} z={self.z} pitch={self.pitch} yaw={self.yaw}>"
def __str__(self):
return str((self.x, self.y, self.z, self.pitch, self.yaw))
def __getitem__(self, item: typing.Union[int, str, slice]):
if item in self.__class__.__slots__: # ["x"]
return getattr(self, item) # give them self.x
positional_attrs = (self.x, self.y, self.z)
return positional_attrs[item] # [0] = x ; [1] = y ; [2] = z
def __setitem__(self, key: typing.Union[int, str, slice], value: typing.Union[int, float]):
fl_val = float(value)
if key in self.__class__.__slots__:
return setattr(self, key, fl_val)
pos_attrs = ("x", "y", "z")
attr_s_to_set = pos_attrs[key]
if type(attr_s_to_set) == str:
setattr(self, attr_s_to_set, fl_val)
else:
for attr_name in attr_s_to_set:
setattr(self, attr_name, fl_val)
def __iter__(self):
for coord in (self.x, self.y, self.z):
yield coord
def __add__(
self,
other: typing.Union[
"DFLocation",
typing.Optional[typing.Iterable[AnyNumber]],
AnyNumber
]
) -> "DFLocation":
return self._exec_arithmetic(other, operator.add, op_name="addition")
def __radd__(
self,
other: typing.Union[
"DFLocation",
typing.Optional[typing.Iterable[AnyNumber]],
AnyNumber
]
) -> "DFLocation":
return self.__add__(other)
def __sub__(
self,
other: typing.Union[
"DFLocation",
typing.Optional[typing.Iterable[AnyNumber]],
AnyNumber
]
) -> "DFLocation":
return self._exec_arithmetic(other, operator.sub, op_name="subtraction")
def __rsub__(
self,
other: typing.Union[
"DFLocation",
typing.Optional[typing.Iterable[AnyNumber]],
AnyNumber
]
) -> "DFLocation":
return DFLocation.__add__(-self, other)
def __mul__(
self,
other: typing.Union[
"DFLocation",
typing.Optional[typing.Iterable[AnyNumber]],
AnyNumber
]
) -> "DFLocation":
return self._exec_arithmetic(other, operator.mul, op_name="multiplication")
def __rmul__(
self,
other: typing.Union[
"DFLocation",
typing.Optional[typing.Iterable[AnyNumber]],
AnyNumber
]
) -> "DFLocation":
return self.__mul__(other)
def __truediv__(
self,
other: typing.Union[
"DFLocation",
typing.Optional[typing.Iterable[AnyNumber]],
AnyNumber
]
) -> "DFLocation":
return self._exec_arithmetic(other, operator.truediv, op_name="division")
def __floordiv__(
self,
other: typing.Union[
"DFLocation",
typing.Optional[typing.Iterable[AnyNumber]],
AnyNumber
]
) -> "DFLocation":
return self._exec_arithmetic(other, operator.floordiv, op_name="division")
def __pow__(
self,
other: typing.Union[
"DFLocation",
typing.Optional[typing.Iterable[AnyNumber]],
AnyNumber
]
) -> "DFLocation":
return self._exec_arithmetic(other, pow, op_name="power")
def __neg__(self):
new_loc = self.copy()
for attr in ("x", "y", "z", "pitch", "yaw"):
setattr(new_loc, attr, -1 * (getattr(self, attr)))
return new_loc
def __pos__(self):
return self
def __abs__(self):
new_loc = self.copy()
for attr in ("x", "y", "z", "pitch", "yaw"):
setattr(new_loc, attr, abs(getattr(self, attr)))
return new_loc
def __ceil__(self):
new_loc = self.copy()
for attr in ("x", "y", "z", "pitch", "yaw"):
setattr(new_loc, attr, float(math.ceil(getattr(self, attr))))
return new_loc
def __floor__(self):
new_loc = self.copy()
for attr in ("x", "y", "z", "pitch", "yaw"):
setattr(new_loc, attr, float(math.floor(getattr(self, attr))))
return new_loc
def __hash__(self):
return hash(tuple(getattr(self, attr) for attr in ("x", "y", "z", "pitch", "yaw")))
class DFSound(DFType):
"""Used for DF Sounds (Blaze Death, XP Level up etc.)
Parameters\u200b
----------
sound_type : :class:`SoundType`
The enum instance that specifies which sound is this.
pitch : :class:`float`
The pitch of this sound (between ``0.0`` and ``2.0``, inclusive). Defaults to 1.0
volume : :class:`float`
The volume of this sound. Defaults to 2.0
Raises
------\u200b
:exc:`ValueError`
Raised if the given pitch is outside the distance ``0.0 <= x <= 2.0`` .
.. container: comparisons
.. describe:: a == b, a != b
Compares every attribute of `a` and `b`.
Attributes\u200b
-------------
sound_type : :class:`SoundType`
The enum instance that specifies which sound is this.
pitch : :class:`float`
The pitch of this sound. Defaults to 1.0
volume : :class:`float`
The volume of this sound. Defaults to 2.0
"""
__slots__ = ("sound_type", "_pitch", "volume")
sound_type: SoundType
_pitch: float
volume: float
def __init__(
self, sound_type: SoundType, *, volume: float = DEFAULT_SOUND_VOL,
pitch: AnyNumber = DEFAULT_SOUND_PITCH
):
"""
Parameters
----------
sound_type : :class:`SoundType`
The enum instance that specifies which sound is this.
pitch : :class:`float`
The pitch of this sound (between ``0.0`` and ``2.0``, inclusive). Defaults to 1.0
volume : :class:`float`
The volume of this sound. Defaults | |
(self.samples_per_cluster[idx, i] == 0):
# raise ValueError("\n\nCluster %d of file %s has no samples!\n\n" % (i, fpath))
if (np.sum(self.samples_per_cluster[idx, :]) != self.num_samples):
raise ValueError("\n\nSamplers per cluster sum: %d, not equal to total num samples: %d\n\n" % (np.sum(self.samples_per_cluster[idx,:]), self.num_samples))
# print("\n\nDone\n\n")
# exit(0);
# for i in range(self.num_clusters):
# self.cluster_idxs.append(self.clustered_inputs[])
self.file_idxs = np.random.permutation(np.arange(self.num_files))
self.current_file = 0
self.current_sample = 0
self.barrier = mp.Barrier(self.args.num_data_workers)
# Set the starting dataset
self.input_dataset = None
self.output_dataset = None
self.cluster_idxs = [None] * self.num_clusters
self.sampling_idxs = [None] * self.num_clusters
self.current_sampling_idx = [None] * self.num_clusters
self.reset_dataset()
def set_scalers(self, input_scaler, output_scaler):
self.input_scaler = input_scaler
self.output_scaler = output_scaler
if (not self.is_input_scaled):
self.input_dataset = self.input_scaler.do_scaling_sample(self.input_dataset)
else:
raise ValueError("\n\nBig Clustered Dataset INPUT already scaled. Exiting.\n\n")
if (not self.is_output_scaled):
self.output_dataset = self.output_scaler.do_scaling_sample(self.output_dataset)
else:
raise ValueError("\n\nBig Clustered Dataset OUTPUT already scaled. Exiting.\n\n")
# pick a sample within some cluster
def get_clustered_idx(self, idx):
cluster = int(idx % self.num_clusters)
file_idx = self.file_idxs[self.current_file]
num_cluster_samples = self.samples_per_cluster[file_idx, cluster]
# Rejection sampling
if (num_cluster_samples == 0):
bad_iters = 0
while (num_cluster_samples == 0):
cluster = np.random.randint(self.num_clusters)
num_cluster_samples = self.samples_per_cluster[file_idx, cluster]
if (bad_iters > 100):
raise ValueError("\n\n100 successive bad clusters. Exiting.\n\n")
bad_iters += 1
# Must randomly select because subsampling and do not want to bias data choice
# idx_within_cluster = np.random.randint(num_cluster_samples)
front = self.sampling_idxs[cluster]
# print("Front: ", type(front))
back = self.current_sampling_idx[cluster] % num_cluster_samples
# print("Back: ", type(back), back)
back = int(back)
idx_within_cluster = front[back]
# remove chance of revisiting same sample within a sample
# idx_within_cluster = self.sampling_idxs[cluster][self.current_sampling_idx[cluster] % num_cluster_samples]
self.current_sampling_idx[cluster] += 1
# if (self.cluster_idxs[cluster].shape[0] != num_cluster_samples):
# raise ValueError("\n\nRank: %d, Get IDX. New_File_Idx: %d, current_file: %d, Cluster id: %d, CIDS: %d, SPC: %d, SAMPLE: %d\n\n" % (hvd.rank(), file_idx, self.current_file, cluster, self.cluster_idxs[cluster].shape[0], num_cluster_samples, idx_within_cluster))
# return the original fp sample idx given a cluster and idx_within_cluster
# return np.arange(self.num_samples)[self.clustered_inputs[file_idx, :] == cluster][idx_within_cluster]
return self.cluster_idxs[cluster][idx_within_cluster]
def reset_dataset(self):
# Create a new permutation
if (self.current_file == self.num_files):
self.file_idxs = np.random.permutation(np.arange(self.num_files))
self.current_file = 0
del self.input_dataset
del self.output_dataset
new_file_idx = self.file_idxs[self.current_file]
# Load file into memory
self.input_dataset = np.load(self.input_fpaths[new_file_idx])
self.output_dataset = np.load(self.output_fpaths[new_file_idx])
# Reshape data
self.input_dataset = np.reshape(self.input_dataset, \
self.input_shape)
self.output_dataset = np.reshape(self.output_dataset, \
self.output_shape)
# Subset data
self.input_dataset = self.input_dataset[:, self.input_mask]
self.output_dataset = self.output_dataset[:, self.output_mask]
# Scale data
self.input_dataset = self.input_scaler.do_scaling_sample(self.input_dataset)
self.output_dataset = self.output_scaler.do_scaling_sample(self.output_dataset)
# cidxs = mp.Manager().list(range(self.num_clusters))
# sidxs = mp.Manager().list(range(self.num_clusters))
# csidx = mp.Manager().list()
# def set_idx(i):
# cidxs[i] = np.arange(self.num_samples)[self.clustered_inputs[new_file_idx, :] == i]
# sidxs[i] = np.random.permutation(np.arange(self.samples_per_cluster[new_file_idx, i], dtype=np.int64))
# self.current_sampling_idx[i] = 0
# pool = mp.Pool()
# for i in range(self.num_clusters):
# pool.apply_async(set_idx, (i,))
# pool.close()
# for i in range(self.num_clusters):
# self.cluster_idxs[i] = cidxs[i]
# self.sampling_idxs[i] = sidxs[i]
# self.current_sampling_idx[i] = 0
# Reset cluster idxs for the new snapshot
for i in range(self.num_clusters):
self.cluster_idxs[i] = np.arange(self.num_samples)[self.clustered_inputs[new_file_idx, :] == i]
self.sampling_idxs[i] = np.random.permutation(np.arange(self.samples_per_cluster[new_file_idx, i], dtype=np.int64))
self.current_sampling_idx[i] = 0
# if (self.cluster_idxs[i].shape[0] != self.samples_per_cluster[new_file_idx, i]):
# raise ValueError("\n\nRank: %d, Reset dataset. New_File_Idx: %d, current_file: %d, Cluster id: %d, CIDS: %d, SPC: %d\n\n" % (hvd.rank(), new_file_idx, self.current_file, i, self.cluster_idxs[i].shape[0], self.samplers_per_cluster[new_file_idx, i]))
# Fetch a sample
def __getitem__(self, idx):
if (self.reset and self.current_sample >= (self.num_samples * self.cluster_sample_ratio / hvd.size())):
pid = self.barrier.wait()
print("Rank %d, Reset PID: %d" % (hvd.rank(), pid))
self.current_file += 1
if (pid == 0):
print("Rank: %d, Entering reset datset on PID %d" % (hvd.rank(), pid))
self.reset_dataset()
# self.current_file += 1
self.current_sample = 0
print("Rank: %d, PID %d waiting or Done" % (hvd.rank(), pid))
self.barrier.wait()
self.current_sample += self.args.num_data_workers
# sample_idx = idx % self.num_samples
sample_idx = self.get_clustered_idx(idx)
input_tensor = torch.tensor(self.input_dataset[sample_idx, :], dtype=torch.float32)
output_tensor = torch.tensor(self.output_dataset[sample_idx, :], dtype=torch.float32)
return input_tensor, output_tensor
# Number of samples in dataset
def __len__(self):
return int(self.num_files * self.num_samples * self.cluster_sample_ratio)
###-----------------------------------------------------------------------###
# Compressed Dataset
#class Big_Compressed_Dataset(torch.utils.data.Dataset):
#
# def __init__(self, args, data_name, fp_data, ldos_data):
#
# if (hvd.rank() == 0):
# print("Creating Big Compressed Dataset:")
#
# self.args = args
# self.sample = 0
#
# if (args.load_encoder):
# self.encoder = 0
# else:
#
# args.fp_length = fp_data.shape[1]
#
# self.num_subdim = 2
# self.ks = 256
#
# if (args.fp_length % self.num_subdim != 0):
# print("\n\nPQKMeans division error. %d not a factor of %d. Exiting!\n" % (self.num_subdim, args.fp_length))
# exit(0)
#
# self.pqkmeans.encoder.PQEncoder(num_subdim=self.num_subdim, Ks=self.ks
#
# sample_pts = fp_data.shape[0] * args.compress_fit_ratio
#
# if (hvd.rank() == 0):
# print("Begin fitting encoder to subset of data")
#
# tic = timeit.default_timer()
# self.encoder.fit(fp_data[:sample_pts])
# toc = timeit.default_timer()
#
# if (hvd.rank() == 0):
# print("Fit %d samples to %s dataset encoder: %4.4fs" % (sample_pts, data_name, toc - tic))
#
# tic
#
# fp_encode = encoder.transform(fp_data)
#
#
# self
#
#
#
#
# self.cluster_ids = []
#
# for i in range(args.clusters):
# self.cluster_ids.append()
#
#
#
# def __getitem__(self, idx):
#
#
#
# return 0;
#
# def __len__(self):
# return 1;
###-----------------------------------------------------------------------###
###-----------------------------------------------------------------------###
class Big_Data_Scaler:
def __init__(self,
file_paths,
num_samples,
data_shape,
data_subset=None,
element_scaling=False,
standardize=False,
normalize=False,
max_only=False,
apply_log=False):
self.file_paths = file_paths
self.num_samples = num_samples
self.data_shape = data_shape
self.data_subset = data_subset
self.element_scaling = element_scaling
self.standardize = standardize
self.normalize = normalize
self.max_only = max_only
self.apply_log = apply_log
self.no_scaling = not standardize and not normalize
print("Calculating scaling factors.")
self.setup_scaling()
def print_factors(self):
if (self.no_scaling):
print("No Scaling")
if (self.element_scaling):
if (self.standardize):
print("Scaling Element Factors (Mean/Std)")
elif (self.normalize):
print("Scaling Element Factors (Min/Max)")
else:
if (self.standardize):
print("Scaling Total Factors (Mean/Std)")
elif (self.normalize):
print("Scaling Total Factors (Min/Max)")
for i in range(self.factors.shape[1]):
print("%d: %4.4f, %4.4f" % (i, self.factors[0, i], self.factors[1, i]))
# Scale one sample
def do_scaling_sample(self, x):
if (self.no_scaling):
return x
if (not self.element_scaling):
if (self.normalize):
return (x - self.factors[0, 0]) / (self.factors[1, 0] - self.factors[0, 0])
elif(self.standardize):
return (x - self.factors[0, 0]) / self.factors[1, 0]
else:
raise ValueError("\n\nBad scaling choices.\n\n")
else:
if (self.normalize):
return (x - self.factors[0, :]) / (self.factors[1, :] - self.factors[0, :])
elif (self.standardize):
return (x - self.factors[0, :]) / self.factors[1, :]
else:
raise ValueError("\n\nBad scaling choices.\n\n")
# Undo scaling of one sample
def undo_scaling_sample(self, x):
if (self.no_scaling):
return x
if (not self.element_scaling):
if (self.normalize):
return (x * (self.factors[1, 0] - self.factors[0, 0])) + self.factors[0, 0]
elif(self.standardize):
return (x * self.factors[1, 0]) + self.factors[1, 0]
else:
raise ValueError("\n\nBad scaling choices.\n\n")
else:
if (self.normalize):
return (x * (self.factors[1, :] - self.factors[0, :])) + self.factors[0, :]
elif (self.standardize):
return (x * self.factors[1, :]) + self.factors[0, :]
else:
raise ValueError("\n\nBad scaling choices.\n\n")
# Scale batch (or full) data
# def do_scaling_batch(self, x):
#
# if (self.no_scaling):
# return x
#
# if (not self.element_scaling):
# if (self.normalize):
# return (x - self.factors[0, 0]) / (self.factors[1, 0] - self.factors[0, 0])
# elif(self.standardize):
# return (x - self.factors[0, 0]) / self.factors[1, 0]
# else:
# raise ValueError("\n\nBad scaling choices.\n\n")
#
# else:
# if (self.normalize):
# return (x - self.factors[0, :, None]) / (self.factors[1, :, None] - self.factors[0, :, None])
# elif (self.standardize):
# return (x - self.factors[0, :, None]) / self.factors[1, :, None]
#
# else:
# raise ValueError("\n\nBad scaling choices.\n\n")
#
#
# # Undo scaling of batch (or full) data
# def undo_scaling_batch(self, x):
#
# if (self.no_scaling):
# return x
#
# if (not self.element_scaling):
# if (self.normalize):
# return (x * (self.factors[1, 0] - self.factors[0, 0])) + self.factors[0, 0]
# elif(self.standardize):
# return (x * self.factors[1, 0]) + self.factors[1, 0]
# else:
# raise ValueError("\n\nBad scaling choices.\n\n")
#
# else:
# if (self.normalize):
# return (x * (self.factors[1, :, None] - self.factors[0, :, None])) + self.factors[0, :, None]
# elif (self.standardize):
# return (x * self.factors[1, :, None]) + self.factors[0, :, None]
#
# else:
# raise ValueError("\n\nBad scaling choices.\n\n")
#
# Calculate and store scaling factors
def setup_scaling(self):
# Factors
# factors[0,:], Min (normalize) or Mean (standardize)
# factors[1,:], Max (normalize) or Std (standardize)
if (not self.element_scaling):
self.factors = np.zeros([2, 1])
else:
self.factors = np.zeros([2, self.data_subset.size])
if (self.no_scaling):
print("No scaling. Neither standardize nor normalize scaling choosen. ")
return;
sample_count = 0
count_elems = 0
# print("Setup")
for idx, fpath in enumerate(self.file_paths):
file_data = np.load(fpath)
# Shape Data
file_data = np.reshape(file_data, \
np.insert(self.data_shape, \
0, self.num_samples))
# Subset Data
if (self.data_subset is not None):
file_data = file_data[:, self.data_subset]
# Final data shape
self.new_shape = np.array(file_data.shape[1:])
# Total Scaling
if (not self.element_scaling):
if (self.normalize):
self.calc_normalize(file_data, 0)
elif (self.standardize):
count_elems = file_data.size
else:
raise ValueError("\n\nBad scaling choices.\n\n")
# Element Scaling
else:
for elem in range(np.prod(self.new_shape)):
# print("Elem %d" % elem)
# elem_idx = np.zeros(self.new_shape, dtype=bool)
# elem_slice = np.array([])
if (file_data.ndim != 2):
raise ValueError("\nScaler only supports [samples x vector] data.\n")
if (self.normalize):
self.calc_normalize(file_data[:, elem], elem)
elif (self.standardize):
self.calc_standardize(file_data[:, elem], elem, sample_count)
else:
raise ValueError("\n\nBad scaling choices.\n\n")
sample_count += self.num_samples
# if (self.standardize):
# self.factors[1, :] = np.sqrt(self.factors[1, :] / standardize_count)
# Calculate min/max normalization | |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Dict, Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Attributes(msrest.serialization.Model):
"""The object attributes managed by the KeyVault service.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param not_before: Not before date in UTC.
:type not_before: ~datetime.datetime
:param expires: Expiry date in UTC.
:type expires: ~datetime.datetime
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'not_before': {'key': 'nbf', 'type': 'unix-time'},
'expires': {'key': 'exp', 'type': 'unix-time'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
}
def __init__(
self,
*,
enabled: Optional[bool] = None,
not_before: Optional[datetime.datetime] = None,
expires: Optional[datetime.datetime] = None,
**kwargs
):
super(Attributes, self).__init__(**kwargs)
self.enabled = enabled
self.not_before = not_before
self.expires = expires
self.created = None
self.updated = None
class BackupSecretResult(msrest.serialization.Model):
"""The backup secret result, containing the backup blob.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The backup blob containing the backed up secret.
:vartype value: bytes
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'base64'},
}
def __init__(
self,
**kwargs
):
super(BackupSecretResult, self).__init__(**kwargs)
self.value = None
class SecretBundle(msrest.serialization.Model):
"""A secret consisting of a value, id and its attributes.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The secret value.
:type value: str
:param id: The secret id.
:type id: str
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:ivar kid: If this is a secret backing a KV certificate, then this field specifies the
corresponding key backing the KV certificate.
:vartype kid: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a secret
backing a certificate, then managed will be true.
:vartype managed: bool
"""
_validation = {
'kid': {'readonly': True},
'managed': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'kid': {'key': 'kid', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
}
def __init__(
self,
*,
value: Optional[str] = None,
id: Optional[str] = None,
content_type: Optional[str] = None,
attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(SecretBundle, self).__init__(**kwargs)
self.value = value
self.id = id
self.content_type = content_type
self.attributes = attributes
self.tags = tags
self.kid = None
self.managed = None
class DeletedSecretBundle(SecretBundle):
"""A Deleted Secret consisting of its previous id, attributes and its tags, as well as information on when it will be purged.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The secret value.
:type value: str
:param id: The secret id.
:type id: str
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:ivar kid: If this is a secret backing a KV certificate, then this field specifies the
corresponding key backing the KV certificate.
:vartype kid: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a secret
backing a certificate, then managed will be true.
:vartype managed: bool
:param recovery_id: The url of the recovery object, used to identify and recover the deleted
secret.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the secret is scheduled to be purged, in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar deleted_date: The time when the secret was deleted, in UTC.
:vartype deleted_date: ~datetime.datetime
"""
_validation = {
'kid': {'readonly': True},
'managed': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'kid': {'key': 'kid', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(
self,
*,
value: Optional[str] = None,
id: Optional[str] = None,
content_type: Optional[str] = None,
attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
recovery_id: Optional[str] = None,
**kwargs
):
super(DeletedSecretBundle, self).__init__(value=value, id=id, content_type=content_type, attributes=attributes, tags=tags, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
class SecretItem(msrest.serialization.Model):
"""The secret item containing secret metadata.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Secret identifier.
:type id: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a key backing
a certificate, then managed will be true.
:vartype managed: bool
"""
_validation = {
'managed': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
}
def __init__(
self,
*,
id: Optional[str] = None,
attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
content_type: Optional[str] = None,
**kwargs
):
super(SecretItem, self).__init__(**kwargs)
self.id = id
self.attributes = attributes
self.tags = tags
self.content_type = content_type
self.managed = None
class DeletedSecretItem(SecretItem):
"""The deleted secret item containing metadata about the deleted secret.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Secret identifier.
:type id: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_3_preview.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a key backing
a certificate, then managed will be true.
:vartype managed: bool
:param recovery_id: The url of the recovery object, used to identify and recover the deleted
secret.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the secret is scheduled to be purged, in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar deleted_date: The time when the secret was deleted, in UTC.
:vartype deleted_date: ~datetime.datetime
"""
_validation = {
'managed': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(
self,
*,
id: Optional[str] = None,
attributes: Optional["SecretAttributes"] = None,
tags: Optional[Dict[str, str]] = None,
content_type: Optional[str] = None,
recovery_id: Optional[str] = None,
**kwargs
):
super(DeletedSecretItem, self).__init__(id=id, attributes=attributes, tags=tags, content_type=content_type, **kwargs)
self.recovery_id = recovery_id
self.scheduled_purge_date = None
self.deleted_date = None
class DeletedSecretListResult(msrest.serialization.Model):
"""The deleted secret list result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A response message containing a list of the deleted secrets in the | |
from djitellopy.tello import Tello
import cv2
import numpy as np
import time
import datetime
import os
import argparse
# standard argparse stuff
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False)
parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='** = required')
parser.add_argument('-d', '--distance', type=int, default=3,
help='use -d to change the distance of the drone. Range 0-6')
parser.add_argument('-sx', '--saftey_x', type=int, default=100,
help='use -sx to change the saftey bound on the x axis . Range 0-480')
parser.add_argument('-sy', '--saftey_y', type=int, default=55,
help='use -sy to change the saftey bound on the y axis . Range 0-360')
parser.add_argument('-os', '--override_speed', type=int, default=1,
help='use -os to change override speed. Range 0-3')
parser.add_argument('-ss', "--save_session", action='store_true',
help='add the -ss flag to save your session as an image sequence in the Sessions folder')
parser.add_argument('-D', "--debug", action='store_true',
help='add the -D flag to enable debug mode. Everything works the same, but no commands will be sent to the drone')
args = parser.parse_args()
# Speed of the drone
S = 20
S2 = 5
UDOffset = 150
# this is just the bound box sizes that openCV spits out *shrug*
faceSizes = [1026, 684, 456, 304, 202, 136, 90]
# These are the values in which kicks in speed up mode, as of now, this hasn't been finalized or fine tuned so be careful
# Tested are 3, 4, 5
acc = [500, 250, 250, 150, 110, 70, 50]
# Frames per second of the pygame window display
FPS = 25
dimensions = (960, 720)
#
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
# If we are to save our sessions, we need to make sure the proper directories exist
if args.save_session:
ddir = "Sessions"
if not os.path.isdir(ddir):
os.mkdir(ddir)
ddir = "Sessions/Session {}".format(str(datetime.datetime.now()).replace(':', '-').replace('.', '_'))
os.mkdir(ddir)
class FrontEnd(object):
def __init__(self):
# Init Tello object that interacts with the Tello drone
self.tello = Tello()
# Drone velocities between -100~100
self.for_back_velocity = 0
self.left_right_velocity = 0
self.up_down_velocity = 0
self.yaw_velocity = 0
self.speed = 10
self.send_rc_control = False
def run(self):
if not self.tello.connect():
print("Tello not connected")
return
if not self.tello.set_speed(self.speed):
print("Not set speed to lowest possible")
return
# In case streaming is on. This happens when we quit this program without the escape key.
if not self.tello.streamoff():
print("Could not stop video stream")
return
if not self.tello.streamon():
print("Could not start video stream")
return
frame_read = self.tello.get_frame_read()
should_stop = False
imgCount = 0
OVERRIDE = False
oSpeed = args.override_speed
tDistance = args.distance
self.tello.get_battery()
# Safety Zone X
szX = args.saftey_x
# Safety Zone Y
szY = args.saftey_y
if args.debug:
print("DEBUG MODE ENABLED!")
while not should_stop:
self.update()
if frame_read.stopped:
frame_read.stop()
break
theTime = str(datetime.datetime.now()).replace(':', '-').replace('.', '_')
frame = cv2.cvtColor(frame_read.frame, cv2.COLOR_BGR2RGB)
frameRet = frame_read.frame
vid = self.tello.get_video_capture()
if args.save_session:
cv2.imwrite("{}/tellocap{}.jpg".format(ddir, imgCount), frameRet)
frame = np.rot90(frame)
imgCount += 1
time.sleep(1 / FPS)
# Listen for key presses
k = cv2.waitKey(20)
# Press 0 to set distance to 0
if k == ord('0'):
if not OVERRIDE:
print("Distance = 0")
tDistance = 0
# Press 1 to set distance to 1
if k == ord('1'):
if OVERRIDE:
oSpeed = 1
else:
print("Distance = 1")
tDistance = 1
# Press 2 to set distance to 2
if k == ord('2'):
if OVERRIDE:
oSpeed = 2
else:
print("Distance = 2")
tDistance = 2
# Press 3 to set distance to 3
if k == ord('3'):
if OVERRIDE:
oSpeed = 3
else:
print("Distance = 3")
tDistance = 3
# Press 4 to set distance to 4
if k == ord('4'):
if not OVERRIDE:
print("Distance = 4")
tDistance = 4
# Press 5 to set distance to 5
if k == ord('5'):
if not OVERRIDE:
print("Distance = 5")
tDistance = 5
# Press 6 to set distance to 6
if k == ord('6'):
if not OVERRIDE:
print("Distance = 6")
tDistance = 6
# Press T to take off
if k == ord('t'):
if not args.debug:
print("Taking Off")
self.tello.takeoff()
self.tello.get_battery()
self.send_rc_control = True
# Press L to land
if k == ord('l'):
if not args.debug:
print("Landing")
self.tello.land()
self.send_rc_control = False
# Press Backspace for controls override
if k == 8:
if not OVERRIDE:
OVERRIDE = True
print("OVERRIDE ENABLED")
else:
OVERRIDE = False
print("OVERRIDE DISABLED")
if OVERRIDE:
# S & W to fly forward & back
if k == ord('w'):
self.for_back_velocity = int(S * oSpeed)
elif k == ord('s'):
self.for_back_velocity = -int(S * oSpeed)
else:
self.for_back_velocity = 0
# a & d to pan left & right
if k == ord('d'):
self.yaw_velocity = int(S * oSpeed)
elif k == ord('a'):
self.yaw_velocity = -int(S * oSpeed)
else:
self.yaw_velocity = 0
# Q & E to fly up & down
if k == ord('e'):
self.up_down_velocity = int(S * oSpeed)
elif k == ord('q'):
self.up_down_velocity = -int(S * oSpeed)
else:
self.up_down_velocity = 0
# c & z to fly left & right
if k == ord('c'):
self.left_right_velocity = int(S * oSpeed)
elif k == ord('z'):
self.left_right_velocity = -int(S * oSpeed)
else:
self.left_right_velocity = 0
# Quit the software
if k == 27:
should_stop = True
break
gray = cv2.cvtColor(frameRet, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=2)
# Target size
tSize = faceSizes[tDistance]
# These are our center dimensions
cWidth = int(dimensions[0] / 2)
cHeight = int(dimensions[1] / 2)
noFaces = len(faces) == 0
# if we've given rc controls & get face coords returned
if self.send_rc_control and not OVERRIDE:
for (x, y, w, h) in faces:
#
roi_gray = gray[y:y + h, x:x + w] # (ycord_start, ycord_end)
roi_color = frameRet[y:y + h, x:x + w]
# setting Face Box properties
fbCol = (255, 0, 0) # BGR 0-255
fbStroke = 2
# end coords are the end of the bounding box x & y
end_cord_x = x + w
end_cord_y = y + h
end_size = w * 2
# these are our target coordinates
targ_cord_x = int((end_cord_x + x) / 2)
targ_cord_y = int((end_cord_y + y) / 2) + UDOffset
# This calculates the vector from your face to the center of the screen
vTrue = np.array((cWidth, cHeight, tSize))
vTarget = np.array((targ_cord_x, targ_cord_y, end_size))
vDistance = vTrue - vTarget
#
if not args.debug:
# for turning
if vDistance[0] < -szX:
self.yaw_velocity = S
# self.left_right_velocity = S2
elif vDistance[0] > szX:
self.yaw_velocity = -S
# self.left_right_velocity = -S2
else:
self.yaw_velocity = 0
# for up & down
if vDistance[1] > szY:
self.up_down_velocity = S
elif vDistance[1] < -szY:
self.up_down_velocity = -S
else:
self.up_down_velocity = 0
F = 0
if abs(vDistance[2]) > acc[tDistance]:
F = S
# for forward back
if vDistance[2] > 0:
self.for_back_velocity = S + F
elif vDistance[2] < 0:
self.for_back_velocity = -S - F
else:
self.for_back_velocity = 0
# Draw the face bounding box
cv2.rectangle(frameRet, (x, y), (end_cord_x, end_cord_y), fbCol, fbStroke)
# Draw the target as a circle
cv2.circle(frameRet, (targ_cord_x, targ_cord_y), 10, (0, 255, 0), 2)
# Draw the safety zone
cv2.rectangle(frameRet, (targ_cord_x - szX, targ_cord_y - szY),
(targ_cord_x + szX, targ_cord_y + szY), (0, 255, 0), fbStroke)
# Draw the estimated drone vector position in relation to face bounding box
cv2.putText(frameRet, str(vDistance), (0, 64), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
# if there are no faces detected, don't do anything
if noFaces:
self.yaw_velocity = 0
self.up_down_velocity = 0
self.for_back_velocity = 0
print("NO TARGET")
# Draw the center of screen circle, this is what the drone tries to match with the target coords
cv2.circle(frameRet, (cWidth, cHeight), 10, (0, 0, 255), 2)
dCol = lerp(np.array((0, 0, 255)), np.array((255, 255, 255)), tDistance + 1 / 7)
if OVERRIDE:
show = "OVERRIDE: {}".format(oSpeed)
dCol = (255, 255, 255)
else:
show = "AI: {}".format(str(tDistance))
# Draw the distance choosen
cv2.putText(frameRet, show, (32, 664), cv2.FONT_HERSHEY_SIMPLEX, 1, dCol, 2)
# Display the resulting frame
cv2.imshow(f'Tello Tracking...', frameRet)
# On exit, print the battery
self.tello.get_battery()
# When everything done, release the capture
cv2.destroyAllWindows()
# Call it always before finishing. I deallocate resources.
self.tello.end()
def battery(self):
return self.tello.get_battery()[:2]
def update(self):
""" Update routine. Send velocities to Tello."""
if self.send_rc_control:
self.tello.send_rc_control(self.left_right_velocity, self.for_back_velocity, self.up_down_velocity,
self.yaw_velocity)
def lerp(a, b, c):
return a + c * (b - a)
def main():
frontend = FrontEnd()
# run frontend
frontend.run()
if __name__ == '__main__':
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 18-8-8 下午8:06
# @Author : Tom.Lee
# @File : mysqld_exporter.py
# @Product : PyCharm
# @Docs :
# @Source :
from .exporter import Exporter
from ..prometheus import prom
class MysqlExporter(Exporter):
"""
mysql status info
"""
@prom
def mysql_exporter_collector_duration_seconds(self, **kwargs):
pass
@prom
def mysql_exporter_last_scrape_error(self, **kwargs):
pass
@prom
def mysql_exporter_scrapes_total(self, **kwargs):
pass
@prom
def mysql_global_status_aborted_clients(self, **kwargs):
pass
@prom
def mysql_global_status_aborted_connects(self, **kwargs):
pass
@prom
def mysql_global_status_binlog_cache_disk_use(self, **kwargs):
pass
@prom
def mysql_global_status_binlog_cache_use(self, **kwargs):
pass
@prom
def mysql_global_status_binlog_stmt_cache_disk_use(self, **kwargs):
pass
@prom
def mysql_global_status_binlog_stmt_cache_use(self, **kwargs):
pass
@prom
def mysql_global_status_buffer_pool_page_changes_total(self, **kwargs):
pass
@prom
def mysql_global_status_buffer_pool_pages(self, **kwargs):
pass
@prom
def mysql_global_status_bytes_received(self, **kwargs):
pass
@prom
def mysql_global_status_bytes_sent(self, **kwargs):
pass
@prom
def mysql_global_status_commands_total(self, **kwargs):
pass
@prom
def mysql_global_status_connection_errors_total(self, **kwargs):
pass
@prom
def mysql_global_status_connections(self, **kwargs):
pass
@prom
def mysql_global_status_created_tmp_disk_tables(self, **kwargs):
pass
@prom
def mysql_global_status_created_tmp_files(self, **kwargs):
pass
@prom
def mysql_global_status_created_tmp_tables(self, **kwargs):
pass
@prom
def mysql_global_status_delayed_errors(self, **kwargs):
pass
@prom
def mysql_global_status_delayed_insert_threads(self, **kwargs):
pass
@prom
def mysql_global_status_delayed_writes(self, **kwargs):
pass
@prom
def mysql_global_status_flush_commands(self, **kwargs):
pass
@prom
def mysql_global_status_handlers_total(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_available_undo_logs(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_buffer_pool_bytes_data(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_buffer_pool_bytes_dirty(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_buffer_pool_read_ahead(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_buffer_pool_read_ahead_evicted(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_buffer_pool_read_ahead_rnd(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_buffer_pool_read_requests(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_buffer_pool_reads(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_buffer_pool_wait_free(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_buffer_pool_write_requests(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_data_fsyncs(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_data_pending_fsyncs(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_data_pending_reads(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_data_pending_writes(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_data_read(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_data_reads(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_data_writes(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_data_written(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_dblwr_pages_written(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_dblwr_writes(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_log_waits(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_log_write_requests(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_log_writes(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_num_open_files(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_os_log_fsyncs(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_os_log_pending_fsyncs(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_os_log_pending_writes(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_os_log_written(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_page_size(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_pages_created(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_pages_read(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_pages_written(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_row_lock_current_waits(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_row_lock_time(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_row_lock_time_avg(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_row_lock_time_max(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_row_lock_waits(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_row_ops_total(self, **kwargs):
pass
@prom
def mysql_global_status_innodb_truncated_status_writes(self, **kwargs):
pass
@prom
def mysql_global_status_key_blocks_not_flushed(self, **kwargs):
pass
@prom
def mysql_global_status_key_blocks_unused(self, **kwargs):
pass
@prom
def mysql_global_status_key_blocks_used(self, **kwargs):
pass
@prom
def mysql_global_status_key_read_requests(self, **kwargs):
pass
@prom
def mysql_global_status_key_reads(self, **kwargs):
pass
@prom
def mysql_global_status_key_write_requests(self, **kwargs):
pass
@prom
def mysql_global_status_key_writes(self, **kwargs):
pass
@prom
def mysql_global_status_locked_connects(self, **kwargs):
pass
@prom
def mysql_global_status_max_execution_time_exceeded(self, **kwargs):
pass
@prom
def mysql_global_status_max_execution_time_set(self, **kwargs):
pass
@prom
def mysql_global_status_max_execution_time_set_failed(self, **kwargs):
pass
@prom
def mysql_global_status_max_used_connections(self, **kwargs):
pass
@prom
def mysql_global_status_not_flushed_delayed_rows(self, **kwargs):
pass
@prom
def mysql_global_status_ongoing_anonymous_transaction_count(self, **kwargs):
pass
@prom
def mysql_global_status_open_files(self, **kwargs):
pass
@prom
def mysql_global_status_open_streams(self, **kwargs):
pass
@prom
def mysql_global_status_open_table_definitions(self, **kwargs):
pass
@prom
def mysql_global_status_open_tables(self, **kwargs):
pass
@prom
def mysql_global_status_opened_files(self, **kwargs):
pass
@prom
def mysql_global_status_opened_table_definitions(self, **kwargs):
pass
@prom
def mysql_global_status_opened_tables(self, **kwargs):
pass
@prom
def mysql_global_status_performance_schema_lost_total(self, **kwargs):
pass
@prom
def mysql_global_status_prepared_stmt_count(self, **kwargs):
pass
@prom
def mysql_global_status_qcache_free_blocks(self, **kwargs):
pass
@prom
def mysql_global_status_qcache_free_memory(self, **kwargs):
pass
@prom
def mysql_global_status_qcache_hits(self, **kwargs):
pass
@prom
def mysql_global_status_qcache_inserts(self, **kwargs):
pass
@prom
def mysql_global_status_qcache_lowmem_prunes(self, **kwargs):
pass
@prom
def mysql_global_status_qcache_not_cached(self, **kwargs):
pass
@prom
def mysql_global_status_qcache_queries_in_cache(self, **kwargs):
pass
@prom
def mysql_global_status_qcache_total_blocks(self, **kwargs):
pass
@prom
def mysql_global_status_queries(self, **kwargs):
pass
@prom
def mysql_global_status_questions(self, **kwargs):
pass
@prom
def mysql_global_status_select_full_join(self, **kwargs):
pass
@prom
def mysql_global_status_select_full_range_join(self, **kwargs):
pass
@prom
def mysql_global_status_select_range(self, **kwargs):
pass
@prom
def mysql_global_status_select_range_check(self, **kwargs):
pass
@prom
def mysql_global_status_select_scan(self, **kwargs):
pass
@prom
def mysql_global_status_slave_open_temp_tables(self, **kwargs):
pass
@prom
def mysql_global_status_slow_launch_threads(self, **kwargs):
pass
@prom
def mysql_global_status_slow_queries(self, **kwargs):
pass
@prom
def mysql_global_status_sort_merge_passes(self, **kwargs):
pass
@prom
def mysql_global_status_sort_range(self, **kwargs):
pass
@prom
def mysql_global_status_sort_rows(self, **kwargs):
pass
@prom
def mysql_global_status_sort_scan(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_accept_renegotiates(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_accepts(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_callback_cache_hits(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_client_connects(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_connect_renegotiates(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_ctx_verify_depth(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_ctx_verify_mode(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_default_timeout(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_finished_accepts(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_finished_connects(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_session_cache_hits(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_session_cache_misses(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_session_cache_overflows(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_session_cache_size(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_session_cache_timeouts(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_sessions_reused(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_used_session_cache_entries(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_verify_depth(self, **kwargs):
pass
@prom
def mysql_global_status_ssl_verify_mode(self, **kwargs):
pass
@prom
def mysql_global_status_table_locks_immediate(self, **kwargs):
pass
@prom
def mysql_global_status_table_locks_waited(self, **kwargs):
pass
@prom
def mysql_global_status_table_open_cache_hits(self, **kwargs):
pass
@prom
def mysql_global_status_table_open_cache_misses(self, **kwargs):
pass
@prom
def mysql_global_status_table_open_cache_overflows(self, **kwargs):
pass
@prom
def mysql_global_status_tc_log_max_pages_used(self, **kwargs):
pass
@prom
def mysql_global_status_tc_log_page_size(self, **kwargs):
pass
@prom
def mysql_global_status_tc_log_page_waits(self, **kwargs):
pass
@prom
def mysql_global_status_threads_cached(self, **kwargs):
pass
@prom
def mysql_global_status_threads_connected(self, **kwargs):
pass
@prom
def mysql_global_status_threads_created(self, **kwargs):
pass
@prom
def mysql_global_status_threads_running(self, **kwargs):
pass
@prom
def mysql_global_status_uptime(self, **kwargs):
pass
@prom
def mysql_global_status_uptime_since_flush_status(self, **kwargs):
pass
@prom
def mysql_global_variables_auto_increment_increment(self, **kwargs):
pass
@prom
def mysql_global_variables_auto_increment_offset(self, **kwargs):
pass
@prom
def mysql_global_variables_autocommit(self, **kwargs):
pass
@prom
def mysql_global_variables_automatic_sp_privileges(self, **kwargs):
pass
@prom
def mysql_global_variables_avoid_temporal_upgrade(self, **kwargs):
pass
@prom
def mysql_global_variables_back_log(self, **kwargs):
pass
@prom
def mysql_global_variables_big_tables(self, **kwargs):
pass
@prom
def mysql_global_variables_binlog_cache_size(self, **kwargs):
pass
@prom
def mysql_global_variables_binlog_direct_non_transactional_updates(self, **kwargs):
pass
@prom
def mysql_global_variables_binlog_group_commit_sync_delay(self, **kwargs):
pass
@prom
def mysql_global_variables_binlog_group_commit_sync_no_delay_count(self, **kwargs):
pass
@prom
def mysql_global_variables_binlog_gtid_simple_recovery(self, **kwargs):
pass
@prom
def mysql_global_variables_binlog_max_flush_queue_time(self, **kwargs):
pass
@prom
def mysql_global_variables_binlog_order_commits(self, **kwargs):
pass
@prom
def mysql_global_variables_binlog_rows_query_log_events(self, **kwargs):
pass
@prom
def mysql_global_variables_binlog_stmt_cache_size(self, **kwargs):
pass
@prom
def mysql_global_variables_bulk_insert_buffer_size(self, **kwargs):
pass
@prom
def mysql_global_variables_check_proxy_users(self, **kwargs):
pass
@prom
def mysql_global_variables_connect_timeout(self, **kwargs):
pass
@prom
def mysql_global_variables_core_file(self, **kwargs):
pass
@prom
def mysql_global_variables_default_password_lifetime(self, **kwargs):
pass
@prom
def mysql_global_variables_default_week_format(self, **kwargs):
pass
@prom
def mysql_global_variables_delay_key_write(self, **kwargs):
pass
@prom
def mysql_global_variables_delayed_insert_limit(self, **kwargs):
pass
@prom
def mysql_global_variables_delayed_insert_timeout(self, **kwargs):
pass
@prom
def mysql_global_variables_delayed_queue_size(self, **kwargs):
pass
@prom
def mysql_global_variables_disconnect_on_expired_password(self, **kwargs):
pass
@prom
def mysql_global_variables_div_precision_increment(self, **kwargs):
pass
@prom
def mysql_global_variables_end_markers_in_json(self, **kwargs):
pass
@prom
def mysql_global_variables_enforce_gtid_consistency(self, **kwargs):
pass
@prom
def mysql_global_variables_eq_range_index_dive_limit(self, **kwargs):
pass
@prom
def mysql_global_variables_event_scheduler(self, **kwargs):
pass
@prom
def mysql_global_variables_expire_logs_days(self, **kwargs):
pass
@prom
def mysql_global_variables_explicit_defaults_for_timestamp(self, **kwargs):
pass
@prom
def mysql_global_variables_flush(self, **kwargs):
pass
@prom
def mysql_global_variables_flush_time(self, **kwargs):
pass
@prom
def mysql_global_variables_foreign_key_checks(self, **kwargs):
pass
@prom
def mysql_global_variables_ft_max_word_len(self, **kwargs):
pass
@prom
def mysql_global_variables_ft_min_word_len(self, **kwargs):
pass
@prom
def mysql_global_variables_ft_query_expansion_limit(self, **kwargs):
pass
@prom
def mysql_global_variables_general_log(self, **kwargs):
pass
@prom
def mysql_global_variables_group_concat_max_len(self, **kwargs):
pass
@prom
def mysql_global_variables_gtid_executed_compression_period(self, **kwargs):
pass
@prom
def mysql_global_variables_gtid_mode(self, **kwargs):
pass
@prom
def mysql_global_variables_host_cache_size(self, **kwargs):
pass
@prom
def mysql_global_variables_ignore_builtin_innodb(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_adaptive_flushing(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_adaptive_flushing_lwm(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_adaptive_hash_index(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_adaptive_hash_index_parts(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_adaptive_max_sleep_delay(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_api_bk_commit_interval(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_api_disable_rowlock(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_api_enable_binlog(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_api_enable_mdl(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_api_trx_level(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_autoextend_increment(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_autoinc_lock_mode(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_buffer_pool_chunk_size(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_buffer_pool_dump_at_shutdown(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_buffer_pool_dump_now(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_buffer_pool_dump_pct(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_buffer_pool_instances(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_buffer_pool_load_abort(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_buffer_pool_load_at_startup(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_buffer_pool_load_now(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_buffer_pool_size(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_change_buffer_max_size(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_checksums(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_cmp_per_index_enabled(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_commit_concurrency(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_compression_failure_threshold_pct(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_compression_level(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_compression_pad_pct_max(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_concurrency_tickets(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_deadlock_detect(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_disable_sort_file_cache(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_doublewrite(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_fast_shutdown(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_file_format_check(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_file_per_table(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_fill_factor(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_flush_log_at_timeout(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_flush_log_at_trx_commit(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_flush_neighbors(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_flush_sync(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_flushing_avg_loops(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_force_load_corrupted(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_force_recovery(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_ft_cache_size(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_ft_enable_diag_print(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_ft_enable_stopword(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_ft_max_token_size(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_ft_min_token_size(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_ft_num_word_optimize(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_ft_result_cache_limit(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_ft_sort_pll_degree(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_ft_total_cache_size(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_io_capacity(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_io_capacity_max(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_large_prefix(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_lock_wait_timeout(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_locks_unsafe_for_binlog(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_log_buffer_size(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_log_checksums(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_log_compressed_pages(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_log_file_size(self, **kwargs):
pass
@prom
def mysql_global_variables_innodb_log_files_in_group(self, | |
#!/usr/bin/env python
# System
import sys
from subprocess import call
import string
import os
import pwd
import grp
import json
from distutils.util import strtobool
import re
# Third Party
import configargparse
class Installer:
FLAVOR_VANILLA = "vanilla"
FLAVOR_DCOS = "dcos"
def main(self):
# Handle arguments
self.parse_arguments()
self.default_arguments()
self.process_arguments()
# Make sure the Weave executable was downloaded
if not os.path.exists("weave"):
raise ValueError("Weave executable has not been downloaded yet. Use 'make setup' to get it.")
# Make sure the Weave Scope executable was downloaded
if not os.path.exists("weave-scope"):
raise ValueError("Weave Scope executable has not been downloaded yet. Use 'make setup' to get it.")
# Do the deed
self.install()
def parse_arguments(self):
# Create an argument parser
self.parser = configargparse.ArgumentParser(description='Install Weave to a Mesos cluster')
# Add arguments to the parser
self.add_common_arguments()
self.add_mesos_arguments()
self.add_weave_arguments()
# Parse arguments out of the command line
self.args = self.parser.parse_args()
def add_common_arguments(self):
# domain
self.parser.add_argument(
"--domain",
dest="domain",
env_var='WIM_DOMAIN',
help="The name to use for DNS names assigned to containers. If you override the default, be sure to set your container hostnames to match. (Weave default: weave.local)"
)
# Local temporary directory
self.parser.add_argument(
"--local-tmp-dir",
dest="local_tmp_dir",
env_var='WIM_TMP_DIR',
default="/tmp",
help="Path for a local temporary directory. (default: '%(default)s')"
)
# Skip warnings?
self.parser.add_argument(
"--skip-warnings",
dest="skip_warnings",
env_var='WIM_SKIP_WARNINGS',
type=str,
default="False",
help="Skip warnings about proceeding with installation at various points. (default: '%(default)s')"
)
def add_mesos_arguments(self):
mesos_group = self.parser.add_argument_group('mesos', 'Mesos')
# Flavor
mesos_group.add_argument(
"--mesos-flavor",
dest="mesos_flavor",
env_var='MESOS_FLAVOR',
choices=[Installer.FLAVOR_VANILLA, Installer.FLAVOR_DCOS],
default=Installer.FLAVOR_VANILLA,
help="The 'flavor' of Mesos to install into. Determines several default values. (default: '%(default)s')"
)
# Nodes
mesos_group.add_argument(
"--mesos-public-slaves",
dest="mesos_public_slaves",
env_var='MESOS_PUBLIC_SLAVES',
type=str,
help="List of addresses of public Mesos slave nodes. Delimited by commas, colons, semicolons, pipes, or whitespace."
)
mesos_group.add_argument(
"--mesos-private-slaves",
dest="mesos_private_slaves",
env_var='MESOS_PRIVATE_SLAVES',
type=str,
help="List of addresses of private Mesos slave nodes. Delimited by commas, colons, semicolons, pipes, or whitespace."
)
# Admin username
mesos_group.add_argument(
"--mesos-admin-username",
dest="mesos_admin_username",
env_var='MESOS_ADMIN_USERNAME',
help="Admin username for Mesos nodes. (default: Determined by 'flavor')"
)
# Service names
mesos_group.add_argument(
"--mesos-slave-service-name-public",
dest="mesos_slave_service_name_public",
env_var='MESOS_SLAVE_SERVICE_NAME_PUBLIC',
help="Name of Mesos public slave systemd service. (default: Determined by 'flavor')"
)
mesos_group.add_argument(
"--mesos-slave-service-name-private",
dest="mesos_slave_service_name_private",
env_var='MESOS_SLAVE_SERVICE_NAME_PRIVATE',
help="Name of Mesos private slave systemd service. (default: Determined by 'flavor')"
)
# Executor environment file
mesos_group.add_argument(
"--mesos-slave-executor-env-file",
dest="mesos_slave_executor_env_file",
env_var='MESOS_SLAVE_EXECUTOR_ENV_FILE',
help="Path for the Mesos executor environment config file. (default: Determined by 'flavor')"
)
def add_weave_arguments(self):
weave_group = self.parser.add_argument_group('weave', 'Weave')
# Installation directory
weave_group.add_argument(
"--weave-install-dir",
dest="weave_install_dir",
env_var='WEAVE_INSTALL_DIR',
default=None,
help="The directory in which to install Weave. (default: /home/<mesos_admin_username>)"
)
# weave-with-router/weave-without-router
with_router = weave_group.add_mutually_exclusive_group(required=False)
with_router.add_argument(
'--weave-with-router',
dest='weave_with_router',
env_var='WEAVE_WITH_ROUTER',
action='store_true',
help="Install the Weave router."
)
with_router.add_argument(
'--weave-without-router',
dest='weave_with_router',
env_var='WEAVE_WITHOUT_ROUTER',
action='store_false',
help="Do not install the Weave router."
)
with_router.set_defaults(weave_with_router=True)
# weave-with-proxy/weave-without-proxy
with_proxy = weave_group.add_mutually_exclusive_group(required=False)
with_proxy.add_argument(
'--weave-with-proxy',
dest='weave_with_proxy',
env_var='WEAVE_WITH_PROXY',
action='store_true',
help="Install the Weave proxy."
)
with_proxy.add_argument(
'--weave-without-proxy',
dest='weave_with_proxy',
env_var='WEAVE_WITHOUT_PROXY',
action='store_false',
help="Do not install the Weave proxy."
)
with_proxy.set_defaults(weave_with_proxy=True)
# weave-with-scope/weave-without-scope
with_scope = weave_group.add_mutually_exclusive_group(required=False)
with_scope.add_argument(
'--weave-with-scope',
dest='weave_with_scope',
env_var='WEAVE_WITH_SCOPE',
action='store_true',
help="Install the Weave scope."
)
with_scope.add_argument(
'--weave-without-scope',
dest='weave_with_scope',
env_var='WEAVE_WITHOUT_SCOPE',
action='store_false',
help="Do not install the Weave scope."
)
with_scope.set_defaults(weave_with_scope=True)
# Components
self.add_weave_router_arguments()
self.add_weave_proxy_arguments()
self.add_weave_scope_arguments()
def add_weave_router_arguments(self):
weave_router_group = self.parser.add_argument_group('weave-router', 'Weave Router')
# ipalloc-range
weave_router_group.add_argument(
"--weave-router-ipalloc-range",
dest="weave_router_ipalloc_range",
env_var='WEAVE_ROUTER_IPALLOC_RANGE',
help="The range of IP numbers for Weave network nodes in CIDR form. (Weave default: 10.32.0.0/12)"
)
# password
weave_router_group.add_argument(
"--weave-router-password",
dest="weave_router_password",
env_var='WEAVE_ROUTER_PASSWORD',
help="Router password"
)
# nickname
weave_router_group.add_argument(
"--weave-router-nickname",
dest="weave_router_nickname",
env_var='WEAVE_ROUTER_NICKNAME',
help="Router nickname"
)
# init-peer-count
weave_router_group.add_argument(
"--weave-router-init-peer-count",
dest="weave_router_init_peer_count",
env_var='WEAVE_ROUTER_INIT_PEER_COUNT',
help="Router initial peer count"
)
def add_weave_proxy_arguments(self):
weave_proxy_group = self.parser.add_argument_group('weave-proxy', 'Weave Proxy')
# Docker socket path
weave_proxy_group.add_argument(
"--weave-proxy-socket",
dest="weave_proxy_socket",
env_var='WEAVE_PROXY_SOCKET',
default="/var/run/weave/weave.sock",
help="The Weave proxy socket path. (default: %(default)s)"
)
# with-dns/without-dns
with_dns_parser = weave_proxy_group.add_mutually_exclusive_group(required=False)
with_dns_parser.add_argument(
'--weave-proxy-with-dns',
dest='weave_proxy_dns',
env_var='WEAVE_PROXY_WITH_DNS',
action='store_true',
help="Use Weave DNS."
)
with_dns_parser.add_argument(
'--weave-proxy-without-dns',
dest='weave_proxy_dns',
env_var='WEAVE_PROXY_WITHOUT_DNS',
action='store_false',
help="Do not use Weave DNS."
)
weave_proxy_group.set_defaults(weave_proxy_dns=True)
# hostname-from-label
weave_proxy_group.add_argument(
"--weave-proxy-hostname-from-label",
dest="weave_proxy_hostname_from_label",
env_var='WEAVE_PROXY_HOSTNAME_FROM_LABEL',
help="Hostname label."
)
# hostname-match
weave_proxy_group.add_argument(
"--weave-proxy-hostname-match",
dest="weave_proxy_hostname_match",
env_var='WEAVE_PROXY_HOSTNAME_MATCH',
help="Hostname match."
)
# hostname-replacement
weave_proxy_group.add_argument(
"--weave-proxy-hostname-replacement",
dest="weave_proxy_hostname_replacement",
env_var='WEAVE_PROXY_HOSTNAME_REPLACEMENT',
help="Hostname replacement."
)
def add_weave_scope_arguments(self):
# TODO: Add some options here
# weave_scope_group = self.parser.add_argument_group('weave-scope', 'Weave Scope')
pass
def is_valid_mesos_flavor(self, name):
if name == Installer.FLAVOR_VANILLA:
return True
if name == Installer.FLAVOR_DCOS:
return True
return False
def default_arguments(self):
# Vanilla flavor
if self.args.mesos_flavor == Installer.FLAVOR_VANILLA:
self.default_arguments_vanilla()
# DCOS flavor
elif self.args.mesos_flavor == Installer.FLAVOR_DCOS:
self.default_arguments_dcos()
# Let Weave installation directory default to the home directory of the Mesos admin user
if self.args.weave_install_dir is None:
self.args.weave_install_dir = "/home/" + self.args.mesos_admin_username
def default_arguments_vanilla(self):
raise Exception("Not yet implemented. Someone needs to determine the defaults for 'vanilla' Mesos flavor")
# if self.args.mesos_admin_username is None:
# self.args.mesos_admin_username = "TBD"
# if self.args.mesos_slave_service_name_public is None:
# self.args.mesos_slave_service_name_public = "TBD"
# if self.args.mesos_slave_service_name_private is None:
# self.args.mesos_slave_service_name_private = "TBD"
# if self.args.mesos_slave_executor_env_file is None:
# self.args.mesos_slave_executor_env_file = "TBD"
def default_arguments_dcos(self):
if self.args.mesos_admin_username is None:
self.args.mesos_admin_username = "core"
if self.args.mesos_slave_service_name_public is None:
self.args.mesos_slave_service_name_public = "dcos-mesos-slave-public.service"
if self.args.mesos_slave_service_name_private is None:
self.args.mesos_slave_service_name_private = "dcos-mesos-slave.service"
if self.args.mesos_slave_executor_env_file is None:
self.args.mesos_slave_executor_env_file = "/opt/mesosphere/etc/mesos-executor-environment.json"
def process_arguments(self):
# Parse slave node lists
self.mesos_public_slaves = parse_delimited_list(self.args.mesos_public_slaves)
self.mesos_private_slaves = parse_delimited_list(self.args.mesos_private_slaves)
# Make sure at least one slave node was specified
if (len(self.mesos_public_slaves) == 0 and len(self.mesos_private_slaves) == 0):
raise ValueError("You must specify at least one Mesos slave node using --mesos-public-slaves or --mesos_private_slaves")
# If either weave-proxy-hostname-match or weave-proxy-hostname-replacement were specified, make sure that both were
weave_proxy_hostname_match_specified = (not self.args.weave_proxy_hostname_match is None)
weave_proxy_hostname_replacement_specified = (not self.args.weave_proxy_hostname_replacement is None)
one_specified = (weave_proxy_hostname_match_specified or weave_proxy_hostname_replacement_specified)
both_specified = (weave_proxy_hostname_match_specified and weave_proxy_hostname_replacement_specified)
if one_specified and not both_specified:
raise ValueError("You must specify both --weave-proxy-hostname-match and --weave-proxy-hostname-replacement (or neither)")
# Validate the Mesos "flavor"
if not self.is_valid_mesos_flavor(self.args.mesos_flavor):
raise ValueError("Invalid mesos-flavor: " + self.args.mesos_flavor)
# Build directory paths for use later
self.weave_bin_dir = self.args.weave_install_dir + "/bin"
self.weave_tmp_dir = self.args.weave_install_dir + "/tmp"
# Append "." to DNS domain, if it's not already there
if not self.args.domain is None and not self.args.domain.endswith("."):
self.args.domain += "."
# Map skip-warnings string to boolean
self.skip_warnings = is_truthy(self.args.skip_warnings)
# Build the Weave systemd service file substitution maps
self.build_weave_router_substitutions()
self.build_weave_proxy_substitutions()
self.build_weave_scope_substitutions()
def build_weave_router_substitutions(self):
substitutions = []
self.append_substitution(substitutions, "{{BIN_DIR}}", self.weave_bin_dir)
weave_router_peers = ' '.join(self.mesos_private_slaves + self.mesos_public_slaves)
self.append_substitution(substitutions, "{{PEERS}}", weave_router_peers)
self.append_substitution(substitutions, "{{IPALLOC_RANGE}}", self.args.weave_router_ipalloc_range, option="--ipalloc-range")
self.append_substitution(substitutions, "{{DNS_DOMAIN}}", self.args.domain, option="--dns-domain")
self.append_substitution(substitutions, "{{PASSWORD}}", self.args.weave_router_password, option="--password")
self.append_substitution(substitutions, "{{NICKNAME}}", self.args.weave_router_nickname, option="--nickname")
self.append_substitution(substitutions, "{{INIT_PEER_COUNT}}", self.args.weave_router_init_peer_count, option="--init-peer-count")
self.weave_router_substitutions = substitutions
def build_weave_proxy_substitutions(self):
substitutions = []
self.append_substitution(substitutions, "{{BIN_DIR}}", self.weave_bin_dir)
if self.args.weave_proxy_dns:
value = "--with-dns"
else:
value = "--without-dns"
self.append_substitution(substitutions, "{{WITH_DNS}}", value)
self.append_substitution(substitutions, "{{HOSTNAME_FROM_LABEL}}", self.args.weave_proxy_hostname_from_label, option="--hostname-from-label")
self.append_substitution(substitutions, "{{HOSTNAME_MATCH}}", self.args.weave_proxy_hostname_match, option="--hostname-match")
self.append_substitution(substitutions, "{{HOSTNAME_REPLACEMENT}}", self.args.weave_proxy_hostname_replacement, option="--hostname-replacement")
self.weave_proxy_substitutions = substitutions
def build_weave_scope_substitutions(self):
substitutions = []
self.append_substitution(substitutions, "{{BIN_DIR}}", self.weave_bin_dir)
self.weave_scope_substitutions = substitutions
def append_substitution(self, substutitions, pattern, value, **kwargs):
if value is None:
replacement = ""
elif 'option' in kwargs:
replacement = kwargs['option'] + " " + value
else:
replacement = value
substutitions.append({'pattern': pattern, 'replacement': replacement})
def install(self):
# Install to public Mesos slaves
for slave in self.mesos_public_slaves:
self.install_into_slave(slave, is_public=True)
# Install to private Mesos slaves
for slave in self.mesos_private_slaves:
self.install_into_slave(slave, is_public=False)
def install_into_slave(self, slave, is_public=False):
print "------------------------------------------------------------------"
print "Installing Weave into Mesos slave: " + slave
# Make sure target directories exist
self.execute_remotely(slave, "sudo install -d " + self.weave_tmp_dir)
self.execute_remotely(slave, "sudo install -d " + self.weave_bin_dir)
# Install Weave executable
self.copy_file_local_to_remote(
slave,
"./weave",
self.weave_bin_dir + "/",
mode=0755,
user="root", group="root"
)
# Define a list to which the install_and_start_service() calls below will add service file names
self.service_file_list = ""
# Install Weave router service
if self.args.weave_with_router:
self.install_and_start_service(slave, "router", substitutions=self.weave_router_substitutions)
# Install weave proxy socket into Mesos slave
# TODO: See issue: https://github.com/TrentBrown/weave-into-mesos/issues/1
key = "DOCKER_HOST"
value = "unix://" + self.args.weave_proxy_socket
self.add_property_to_remote_json_file(
slave,
self.args.mesos_slave_executor_env_file,
key, value,
mode=0644,
user="root", group="root"
)
# Install Weave proxy service
if self.args.weave_with_proxy:
self.install_and_start_service(slave, "proxy", substitutions=self.weave_proxy_substitutions)
# Install Weave scope service
if self.args.weave_with_scope:
# Install executable
self.copy_file_local_to_remote(
slave,
"./weave-scope",
self.weave_bin_dir + "/",
mode=0755,
user="root", group="root"
)
self.install_and_start_service(slave, "scope", substitutions=self.weave_scope_substitutions)
# Install Weave service target file
target_substitutions = []
self.append_substitution(target_substitutions, "{{SERVICE_FILE_LIST}}", self.service_file_list)
self.copy_file_local_to_remote(
slave,
"./weave.target",
"/etc/systemd/system/",
mode=0644,
user="root", group="root",
substitutions=target_substitutions
)
# Restart the Mesos slave so it picks up the new configuration
# TODO: Is there a more graceful way to do this? Currently orphans containers running under Marathon (eg. Chronos).
# TODO: This looks relevant: https://issues.apache.org/jira/browse/MESOS-1474
if not self.proceed("Are you sure you want to restart Mesos slave " + slave + | |
<filename>src/config/vnc_openstack/vnc_openstack/tests/test_firewall.py
# Copyright 2018 Juniper Networks. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import uuid
from gevent import monkey
monkey.patch_all() # noqa
from mock import patch
from neutron_lib import constants
from vnc_api.exceptions import BadRequest
from vnc_api.exceptions import NoIdError
from vnc_api.vnc_api import ApplicationPolicySet
from vnc_api.vnc_api import FirewallPolicy
from vnc_api.vnc_api import FirewallRule
from vnc_api.vnc_api import FirewallSequence
from vnc_api.vnc_api import FirewallServiceType
from vnc_api.vnc_api import PortType
from vnc_api.vnc_api import Project
from vnc_api.vnc_api import VirtualMachineInterface
from vnc_api.vnc_api import VirtualNetwork
from tests import test_case
from vnc_openstack.neutron_plugin_db import\
_NEUTRON_FIREWALL_DEFAULT_GROUP_POLICY_NAME
from vnc_openstack.neutron_plugin_db import\
_NEUTRON_FIREWALL_DEFAULT_IPV4_RULE_NAME
from vnc_openstack.neutron_plugin_db import\
_NEUTRON_FIREWALL_DEFAULT_IPV6_RULE_NAME
from vnc_openstack.neutron_plugin_db import _NEUTRON_FWAAS_TAG_TYPE
class TestFirewallBase(test_case.NeutronBackendTestCase):
@classmethod
def setUpClass(cls):
super(TestFirewallBase, cls).setUpClass(
extra_config_knobs=[('NEUTRON', 'fwaas_enabled', True)])
def setUp(self):
super(TestFirewallBase, self).setUp()
self.project_id = self._vnc_lib.project_create(
Project('project-%s' % self.id()))
self.project = self._vnc_lib.project_read(id=self.project_id)
def _insert_rule(self, project_id, firewall_policy_id, firewall_rule_id,
insert_before=None, insert_after=None):
extra_res_fields = {
'firewall_rule_id': firewall_rule_id,
}
if insert_before:
extra_res_fields['insert_before'] = insert_before
elif insert_after:
extra_res_fields['insert_after'] = insert_after
return self.update_resource(
'firewall_policy',
firewall_policy_id,
project_id,
extra_res_fields=extra_res_fields,
operation='INSERT_RULE')
def _remove_rule(self, project_id, firewall_policy_id, firewall_rule_id):
extra_res_fields = {
'firewall_rule_id': firewall_rule_id,
}
return self.update_resource(
'firewall_policy',
firewall_policy_id,
project_id,
extra_res_fields=extra_res_fields,
operation='REMOVE_RULE')
def _get_tag_fq_name(self, firewall_group, project=None):
if (not project and 'project_id' not in firewall_group and
'tenant_id' not in firewall_group):
return
if not project:
project_id = str(uuid.UUID(firewall_group.get(
'project_id', firewall_group['tenant_id'])))
project = self._vnc_lib.project_read(id=project_id)
return project.fq_name + [
'%s=%s' % (_NEUTRON_FWAAS_TAG_TYPE, firewall_group['id'])]
class TestFirewallGroup(TestFirewallBase):
def test_dedicated_tag_created(self):
neutron_fg = self.create_resource('firewall_group', self.project_id)
tag_fq_name = self._get_tag_fq_name(neutron_fg, self.project)
try:
tag = self._vnc_lib.tag_read(tag_fq_name)
except NoIdError:
msg = ("Dedicated Tag %s for firewall group %s was not created" %
(':'.join(tag_fq_name), neutron_fg['id']))
self.fail(msg)
aps_backrefs = tag.get_application_policy_set_back_refs() or []
self.assertIsNotNone(len(aps_backrefs), 1)
def test_dedicated_tag_and_refs_deleted(self):
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=self.project)
self._vnc_lib.virtual_network_create(vn)
vmi_ids = []
for i in range(3):
vmi = VirtualMachineInterface(
'%s-vmi%d' % (self.id(), i), parent_obj=self.project)
vmi.add_virtual_network(vn)
vmi_ids.append(self._vnc_lib.virtual_machine_interface_create(vmi))
neutron_fg = self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'ports': vmi_ids,
},
)
tag_fq_name = self._get_tag_fq_name(neutron_fg, self.project)
try:
self._vnc_lib.tag_read(tag_fq_name)
except NoIdError:
msg = ("Dedicated Tag %s for firewall group %s was not created" %
(':'.join(tag_fq_name), neutron_fg['id']))
self.fail(msg)
self.delete_resource('firewall_group', self.project_id,
neutron_fg['id'])
self.assertRaises(NoIdError, self._vnc_lib.tag_read, tag_fq_name)
def test_aps_cleaned_if_create_tag_fails(self):
with patch.object(self.neutron_db_obj._vnc_lib, 'tag_create',
side_effect=BadRequest(400, "Fake bad request")):
self.create_resource(
'firewall_group', self.project_id, status="400 Bad Request")
tags = self._vnc_lib.tags_list(parent_id=self.project_id)['tags']
# Only dedicated tag for Neutron FWaaSv2 default firewall group remains
self.assertEquals(len(tags), 1)
apss = self._vnc_lib.application_policy_sets_list(
parent_id=self.project_id)['application-policy-sets']
# Only default Contrail project APS and Neutron FWaaSv2 default
# firewall group remains
self.assertEquals(len(apss), 2)
self.assertEquals(
{r['fq_name'][-1] for r in apss},
set([ApplicationPolicySet(parent_type='project').name,
_NEUTRON_FIREWALL_DEFAULT_GROUP_POLICY_NAME]),
)
def test_aps_cleaned_if_associate_tag_fails(self):
with patch.object(self.neutron_db_obj._vnc_lib, 'set_tag',
side_effect=BadRequest(400, "Fake bad request")):
self.create_resource(
'firewall_group', self.project_id, status='400 Bad Request')
tags = self._vnc_lib.tags_list(parent_id=self.project_id)['tags']
# Only dedicated tag for Neutron FWaaSv2 default firewall group remains
self.assertEquals(len(tags), 1)
apss = self._vnc_lib.application_policy_sets_list(
parent_id=self.project_id)['application-policy-sets']
# Only default Contrail project APS and Neutron FWaaSv2 default
# firewall group remains
self.assertEquals(len(apss), 2)
self.assertEquals(
{r['fq_name'][-1] for r in apss},
set([ApplicationPolicySet(parent_type='project').name,
_NEUTRON_FIREWALL_DEFAULT_GROUP_POLICY_NAME]),
)
def test_ingress_policy_set_to_egress(self):
fp = FirewallPolicy('%s-fp' % self.id(), parent_obj=self.project)
self._vnc_lib.firewall_policy_create(fp)
neutron_fg = self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'ingress_firewall_policy_id': fp.uuid,
},
)
self.assertEquals(neutron_fg['ingress_firewall_policy_id'], fp.uuid)
self.assertEquals(neutron_fg['egress_firewall_policy_id'], fp.uuid)
def test_egress_policy_set_to_ingress(self):
fp = FirewallPolicy('%s-fp' % self.id(), parent_obj=self.project)
self._vnc_lib.firewall_policy_create(fp)
neutron_fg = self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'egress_firewall_policy_id': fp.uuid,
},
)
self.assertEquals(neutron_fg['ingress_firewall_policy_id'], fp.uuid)
self.assertEquals(neutron_fg['egress_firewall_policy_id'], fp.uuid)
def test_can_set_same_egress_and_ingress_policies(self):
fp = FirewallPolicy('%s-fp' % self.id(), parent_obj=self.project)
self._vnc_lib.firewall_policy_create(fp)
neutron_fg = self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'ingress_firewall_policy_id': fp.uuid,
'egress_firewall_policy_id': fp.uuid,
},
)
self.assertEquals(neutron_fg['ingress_firewall_policy_id'], fp.uuid)
self.assertEquals(neutron_fg['egress_firewall_policy_id'], fp.uuid)
def test_cannot_set_different_egress_and_ingress_policies(self):
fp1 = FirewallPolicy('%s-fp1' % self.id(), parent_obj=self.project)
self._vnc_lib.firewall_policy_create(fp1)
fp2 = FirewallPolicy('%s-fp2' % self.id(), parent_obj=self.project)
self._vnc_lib.firewall_policy_create(fp2)
self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'ingress_firewall_policy_id': fp1.uuid,
'egress_firewall_policy_id': fp2.uuid,
},
status="400 Bad Request",
)
def test_egress_and_ingress_policies_remove_if_ingress_deleted(self):
fp = FirewallPolicy('%s-fp' % self.id(), parent_obj=self.project)
self._vnc_lib.firewall_policy_create(fp)
neutron_fg = self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'egress_firewall_policy_id': fp.uuid,
},
)
self.update_resource(
'firewall_group',
neutron_fg['id'],
self.project_id,
extra_res_fields={
'ingress_firewall_policy_id': None,
},
)
neutron_fg = self.read_resource('firewall_group', neutron_fg['id'])
self.assertNotIn('ingress_firewall_policy_id', neutron_fg)
self.assertNotIn('egress_firewall_policy_id', neutron_fg)
def test_egress_and_ingress_policies_remove_if_egress_deleted(self):
fp = FirewallPolicy('%s-fp' % self.id(), parent_obj=self.project)
self._vnc_lib.firewall_policy_create(fp)
neutron_fg = self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'egress_firewall_policy_id': fp.uuid,
},
)
neutron_fg = self.update_resource(
'firewall_group',
neutron_fg['id'],
self.project_id,
extra_res_fields={
'egress_firewall_policy_id': None,
},
)
self.assertNotIn('ingress_firewall_policy_id', neutron_fg)
self.assertNotIn('egress_firewall_policy_id', neutron_fg)
def test_firewall_group_status(self):
neutron_fg = self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'admin_state_up': False,
},
)
self.assertEquals(neutron_fg['status'], constants.DOWN)
neutron_fg = self.update_resource(
'firewall_group',
neutron_fg['id'],
self.project_id,
extra_res_fields={
'admin_state_up': True,
},
)
self.assertEquals(neutron_fg['status'], constants.INACTIVE)
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=self.project)
self._vnc_lib.virtual_network_create(vn)
vmi = VirtualMachineInterface(
'%s-vmi' % self.id(), parent_obj=self.project)
vmi.add_virtual_network(vn)
self._vnc_lib.virtual_machine_interface_create(vmi)
neutron_fg = self.update_resource(
'firewall_group',
neutron_fg['id'],
self.project_id,
extra_res_fields={
'ports': [vmi.uuid],
},
)
self.assertEquals(neutron_fg['status'], constants.INACTIVE)
fp = FirewallPolicy('%s-fp' % self.id(), parent_obj=self.project)
self._vnc_lib.firewall_policy_create(fp)
neutron_fg = self.update_resource(
'firewall_group',
neutron_fg['id'],
self.project_id,
extra_res_fields={
'egress_firewall_policy_id': fp.uuid,
},
)
self.assertEquals(neutron_fg['status'], constants.ACTIVE)
neutron_fg = self.update_resource(
'firewall_group',
neutron_fg['id'],
self.project_id,
extra_res_fields={
'ports': [],
},
)
self.assertEquals(neutron_fg['status'], constants.INACTIVE)
def test_remove_extra_fp_refs(self):
neutron_fg = self.create_resource('firewall_group', self.project_id)
aps = self._vnc_lib.application_policy_set_read(id=neutron_fg['id'])
fp1 = FirewallPolicy('%s-fp1' % self.id(), parent_obj=self.project)
self._vnc_lib.firewall_policy_create(fp1)
fp2 = FirewallPolicy('%s-fp2' % self.id(), parent_obj=self.project)
self._vnc_lib.firewall_policy_create(fp2)
aps.add_firewall_policy(fp1, FirewallSequence(sequence='0.0'))
aps.add_firewall_policy(fp2, FirewallSequence(sequence='1.0'))
self._vnc_lib.application_policy_set_update(aps)
neutron_fg = self.read_resource('firewall_group', neutron_fg['id'])
self.assertEquals(neutron_fg['ingress_firewall_policy_id'], fp1.uuid)
self.assertEquals(neutron_fg['egress_firewall_policy_id'], fp1.uuid)
aps = self._vnc_lib.application_policy_set_read(id=neutron_fg['id'])
fp_refs = aps.get_firewall_policy_refs() or []
self.assertEquals(len(fp_refs), 1)
self.assertEquals(fp_refs[0]['uuid'], fp1.uuid)
def test_firewall_group_port_association(self):
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=self.project)
self._vnc_lib.virtual_network_create(vn)
vmi_ids = []
for i in range(3):
vmi = VirtualMachineInterface(
'%s-vmi%d' % (self.id(), i), parent_obj=self.project)
vmi.add_virtual_network(vn)
vmi_ids.append(self._vnc_lib.virtual_machine_interface_create(vmi))
neutron_fg = self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'ports': vmi_ids[:-1],
},
)
self.assertEquals(set(neutron_fg['ports']), set(vmi_ids[:-1]))
neutron_fg = self.update_resource(
'firewall_group',
neutron_fg['id'],
self.project_id,
extra_res_fields={
'ports': vmi_ids[1:],
},
)
self.assertEquals(set(neutron_fg['ports']), set(vmi_ids[1:]))
def test_multiple_firewall_group_port_association(self):
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=self.project)
self._vnc_lib.virtual_network_create(vn)
vmi = VirtualMachineInterface('%s-vmi' % self.id(),
parent_obj=self.project)
vmi.add_virtual_network(vn)
vmi_id = self._vnc_lib.virtual_machine_interface_create(vmi)
neutron_fg1 = self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'name': '%s-fg1' % self.id(),
'ports': [vmi_id],
},
)
neutron_fg2 = self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'name': '%s-fg2' % self.id(),
},
)
tag1_fq_name = self._get_tag_fq_name(neutron_fg1, self.project)
tag2_fq_name = self._get_tag_fq_name(neutron_fg2, self.project)
self.assertEquals(set(neutron_fg1['ports']), set([vmi_id]))
vmi = self._vnc_lib.virtual_machine_interface_read(id=vmi_id)
tag_refs = [r['to'] for r in vmi.get_tag_refs() or []]
self.assertEquals(len(tag_refs), 1)
self.assertEquals(tag1_fq_name, tag_refs[0])
neutron_fg2 = self.update_resource(
'firewall_group',
neutron_fg2['id'],
self.project_id,
extra_res_fields={
'ports': [vmi_id],
},
)
neutron_fg1 = self.read_resource('firewall_group', neutron_fg1['id'])
self.assertEquals(set(neutron_fg1['ports']), set([vmi_id]))
self.assertEquals(set(neutron_fg2['ports']), set([vmi_id]))
vmi = self._vnc_lib.virtual_machine_interface_read(id=vmi_id)
tag_refs = [r['to'] for r in vmi.get_tag_refs() or []]
self.assertEquals(len(tag_refs), 2)
self.assertIn(tag1_fq_name, tag_refs)
self.assertIn(tag2_fq_name, tag_refs)
neutron_fg1 = self.update_resource(
'firewall_group',
neutron_fg1['id'],
self.project_id,
extra_res_fields={
'ports': [],
},
)
neutron_fg2 = self.read_resource('firewall_group', neutron_fg2['id'])
self.assertFalse(neutron_fg1['ports'])
self.assertEquals(set(neutron_fg2['ports']), set([vmi_id]))
vmi = self._vnc_lib.virtual_machine_interface_read(id=vmi_id)
tag_refs = [r['to'] for r in vmi.get_tag_refs() or []]
self.assertEquals(len(tag_refs), 1)
self.assertEquals(tag2_fq_name, tag_refs[0])
def test_list_firewall_group(self):
vn = VirtualNetwork('%s-vn' % self.id(), parent_obj=self.project)
self._vnc_lib.virtual_network_create(vn)
neutron_fgs = []
fp_ids = []
vmi_ids = []
for i in range(2):
fp = FirewallPolicy('%s-fp%d' % (self.id(), i),
parent_obj=self.project)
fp_ids.append(self._vnc_lib.firewall_policy_create(fp))
vmi = VirtualMachineInterface(
'%s-vmi%d' % (self.id(), i), parent_obj=self.project)
vmi.add_virtual_network(vn)
vmi_ids.append(self._vnc_lib.virtual_machine_interface_create(vmi))
neutron_fgs.append(
self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'name': '%s-fg%d' % (self.id(), i),
'ingress_firewall_policy_id': fp.uuid,
'ports': [vmi.uuid],
},
),
)
list_result = self.list_resource(
'firewall_group',
self.project_id,
req_filters={
'ingress_firewall_policy_id': fp_ids,
},
)
self.assertEquals(len(list_result), len(neutron_fgs))
self.assertEquals({r['id'] for r in list_result},
{r['id'] for r in neutron_fgs})
list_result = self.list_resource(
'firewall_group',
self.project_id,
req_filters={
'egress_firewall_policy_id': fp_ids,
},
)
self.assertEquals(len(list_result), len(neutron_fgs))
self.assertEquals({r['id'] for r in list_result},
{r['id'] for r in neutron_fgs})
list_result = self.list_resource(
'firewall_group',
self.project_id,
req_filters={
'ports': vmi_ids,
},
)
self.assertEquals(len(list_result), len(neutron_fgs))
self.assertEquals({r['id'] for r in list_result},
{r['id'] for r in neutron_fgs})
list_result = self.list_resource(
'firewall_group',
self.project_id,
req_filters={
'ingress_firewall_policy_id': [fp_ids[0]],
},
)
self.assertEquals(len(list_result), 1)
self.assertEquals(list_result[0], neutron_fgs[0])
list_result = self.list_resource(
'firewall_group',
self.project_id,
req_filters={
'egress_firewall_policy_id': [fp_ids[1]],
},
)
self.assertEquals(len(list_result), 1)
self.assertEquals(list_result[0], neutron_fgs[1])
list_result = self.list_resource(
'firewall_group',
self.project_id,
req_filters={
'ports': [vmi_ids[0]],
},
)
self.assertEquals(len(list_result), 1)
self.assertEquals(list_result[0], neutron_fgs[0])
def test_default_firewall_group_exists(self):
list_result = self.list_resource(
'firewall_group',
self.project_id,
req_filters={
'name': _NEUTRON_FIREWALL_DEFAULT_GROUP_POLICY_NAME,
},
)
self.assertEquals(len(list_result), 1)
self.assertEquals(list_result[0]['name'],
_NEUTRON_FIREWALL_DEFAULT_GROUP_POLICY_NAME)
def test_cannot_create_firewall_group_with_default_name(self):
resp = self.create_resource(
'firewall_group',
self.project_id,
extra_res_fields={
'name': _NEUTRON_FIREWALL_DEFAULT_GROUP_POLICY_NAME,
},
status="400 Bad Request",
)
self.assertEquals(resp['exception'],
'FirewallGroupDefaultAlreadyExists')
def test_cannot_update_firewall_group_with_default_name(self):
neutron_fg = self.create_resource('firewall_group', self.project_id)
resp = self.update_resource(
'firewall_group',
neutron_fg['id'],
self.project_id,
extra_res_fields={
'name': _NEUTRON_FIREWALL_DEFAULT_GROUP_POLICY_NAME,
},
status="400 Bad Request",
)
self.assertEquals(resp['exception'],
'FirewallGroupDefaultAlreadyExists')
def test_cannot_update_default_firewall_group(self):
neutron_default_fg = self.list_resource(
'firewall_group',
self.project_id,
req_filters={
'name': _NEUTRON_FIREWALL_DEFAULT_GROUP_POLICY_NAME,
},
)[0]
fp_uuid = self._vnc_lib.firewall_policy_create(FirewallPolicy(
'%s-fp' % self.id(), parent_obj=self.project))
attrs = {
'name': 'fake name',
'description': 'fake description',
'admin_state_up': False,
'ingress_firewall_policy_id': fp_uuid,
'egress_firewall_policy_id': fp_uuid,
}
for attr, value in attrs.items():
resp = self.update_resource(
'firewall_group',
neutron_default_fg['id'],
self.project_id,
extra_res_fields={
attr: value,
},
status="400 Bad Request",
)
self.assertEquals(resp['exception'],
'FirewallGroupCannotUpdateDefault')
# admin can update default firewall group but not the name
attrs.pop('name')
for attr, value in attrs.items():
self.update_resource(
'firewall_group',
neutron_default_fg['id'],
self.project_id,
extra_res_fields={
attr: value,
},
| |
already removed')
# Kafka endpoint functions
kafka_server = 'localhost'
class KafkaReceiver(object):
"""class for receiving and storing messages on a topic from the kafka broker"""
def __init__(self, topic, security_type):
from kafka import KafkaConsumer
remaining_retries = 10
port = 9092
if security_type != 'PLAINTEXT':
security_type = 'SSL'
port = 9093
while remaining_retries > 0:
try:
self.consumer = KafkaConsumer(topic, bootstrap_servers = kafka_server+':'+str(port), security_protocol=security_type)
print('Kafka consumer created on topic: '+topic)
break
except Exception as error:
remaining_retries -= 1
print('failed to connect to kafka (remaining retries '
+ str(remaining_retries) + '): ' + str(error))
time.sleep(1)
if remaining_retries == 0:
raise Exception('failed to connect to kafka - no retries left')
self.events = []
self.topic = topic
self.stop = False
def verify_s3_events(self, keys, exact_match=False, deletions=False):
"""verify stored s3 records agains a list of keys"""
verify_s3_records_by_elements(self.events, keys, exact_match=exact_match, deletions=deletions)
self.events = []
def kafka_receiver_thread_runner(receiver):
"""main thread function for the kafka receiver"""
try:
log.info('Kafka receiver started')
print('Kafka receiver started')
while not receiver.stop:
for msg in receiver.consumer:
receiver.events.append(json.loads(msg.value))
timer.sleep(0.1)
log.info('Kafka receiver ended')
print('Kafka receiver ended')
except Exception as error:
log.info('Kafka receiver ended unexpectedly: %s', str(error))
print('Kafka receiver ended unexpectedly: ' + str(error))
def create_kafka_receiver_thread(topic, security_type='PLAINTEXT'):
"""create kafka receiver and thread"""
receiver = KafkaReceiver(topic, security_type)
task = threading.Thread(target=kafka_receiver_thread_runner, args=(receiver,))
task.daemon = True
return task, receiver
def stop_kafka_receiver(receiver, task):
"""stop the receiver thread and wait for it to finis"""
receiver.stop = True
task.join(1)
try:
receiver.consumer.close()
except Exception as error:
log.info('failed to gracefuly stop Kafka receiver: %s', str(error))
# follow the instruction here to create and sign a broker certificate:
# https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka
# the generated broker certificate should be stored in the java keystore for the use of the server
# assuming the jks files were copied to $KAFKA_DIR and broker name is "localhost"
# following lines must be added to $KAFKA_DIR/config/server.properties
# listeners=PLAINTEXT://localhost:9092,SSL://localhost:9093,SASL_SSL://localhost:9094
# sasl.enabled.mechanisms=PLAIN
# ssl.keystore.location = $KAFKA_DIR/server.keystore.jks
# ssl.keystore.password = <PASSWORD>
# ssl.key.password = <PASSWORD>
# ssl.truststore.location = $KAFKA_DIR/server.truststore.jks
# ssl.truststore.password = <PASSWORD>
# notes:
# (1) we dont test client authentication, hence, no need to generate client keys
# (2) our client is not using the keystore, and the "rootCA.crt" file generated in the process above
# should be copied to: $KAFKA_DIR
def init_kafka():
""" start kafka/zookeeper """
try:
KAFKA_DIR = os.environ['KAFKA_DIR']
except:
KAFKA_DIR = ''
if KAFKA_DIR == '':
log.info('KAFKA_DIR must be set to where kafka is installed')
print('KAFKA_DIR must be set to where kafka is installed')
return None, None, None
DEVNULL = open(os.devnull, 'wb')
print('\nStarting zookeeper...')
try:
zk_proc = subprocess.Popen([KAFKA_DIR+'bin/zookeeper-server-start.sh', KAFKA_DIR+'config/zookeeper.properties'], stdout=DEVNULL)
except Exception as error:
log.info('failed to execute zookeeper: %s', str(error))
print('failed to execute zookeeper: %s' % str(error))
return None, None, None
time.sleep(5)
if zk_proc.poll() is not None:
print('zookeeper failed to start')
return None, None, None
print('Zookeeper started')
print('Starting kafka...')
kafka_log = open('./kafka.log', 'w')
try:
kafka_env = os.environ.copy()
kafka_env['KAFKA_OPTS']='-Djava.security.auth.login.config='+KAFKA_DIR+'config/kafka_server_jaas.conf'
kafka_proc = subprocess.Popen([
KAFKA_DIR+'bin/kafka-server-start.sh',
KAFKA_DIR+'config/server.properties'],
stdout=kafka_log,
env=kafka_env)
except Exception as error:
log.info('failed to execute kafka: %s', str(error))
print('failed to execute kafka: %s' % str(error))
zk_proc.terminate()
kafka_log.close()
return None, None, None
# TODO add kafka checkpoint instead of sleep
time.sleep(15)
if kafka_proc.poll() is not None:
zk_proc.terminate()
print('kafka failed to start. details in: ./kafka.log')
kafka_log.close()
return None, None, None
print('Kafka started')
return kafka_proc, zk_proc, kafka_log
def clean_kafka(kafka_proc, zk_proc, kafka_log):
""" stop kafka/zookeeper """
try:
kafka_log.close()
print('Shutdown Kafka...')
kafka_proc.terminate()
time.sleep(5)
if kafka_proc.poll() is None:
print('Failed to shutdown Kafka... killing')
kafka_proc.kill()
print('Shutdown zookeeper...')
zk_proc.terminate()
time.sleep(5)
if zk_proc.poll() is None:
print('Failed to shutdown zookeeper... killing')
zk_proc.kill()
except:
log.info('kafka/zookeeper already terminated')
def init_env(require_ps=True):
"""initialize the environment"""
if require_ps:
check_ps_configured()
realm = get_realm()
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
zonegroup_meta_checkpoint(zonegroup)
ps_zones = []
zones = []
for conn in zonegroup_conns.zones:
if is_ps_zone(conn):
zone_meta_checkpoint(conn.zone)
ps_zones.append(conn)
elif not conn.zone.is_read_only():
zones.append(conn)
assert_not_equal(len(zones), 0)
if require_ps:
assert_not_equal(len(ps_zones), 0)
return zones, ps_zones
def get_ip():
""" This method returns the "primary" IP on the local box (the one with a default route)
source: https://stackoverflow.com/a/28950776/711085
this is needed because on the teuthology machines: socket.getfqdn()/socket.gethostname() return 127.0.0.1 """
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# address should not be reachable
s.connect(('10.255.255.255', 1))
ip = s.getsockname()[0]
finally:
s.close()
return ip
TOPIC_SUFFIX = "_topic"
SUB_SUFFIX = "_sub"
NOTIFICATION_SUFFIX = "_notif"
##############
# pubsub tests
##############
def test_ps_info():
""" log information for manual testing """
return SkipTest("only used in manual testing")
zones, ps_zones = init_env()
realm = get_realm()
zonegroup = realm.master_zonegroup()
bucket_name = gen_bucket_name()
# create bucket on the first of the rados zones
bucket = zones[0].create_bucket(bucket_name)
# create objects in the bucket
number_of_objects = 10
for i in range(number_of_objects):
key = bucket.new_key(str(i))
key.set_contents_from_string('bar')
print('Zonegroup: ' + zonegroup.name)
print('user: ' + get_user())
print('tenant: ' + get_tenant())
print('Master Zone')
print_connection_info(zones[0].conn)
print('PubSub Zone')
print_connection_info(ps_zones[0].conn)
print('Bucket: ' + bucket_name)
def test_ps_s3_notification_low_level():
""" test low level implementation of s3 notifications """
zones, ps_zones = init_env()
bucket_name = gen_bucket_name()
# create bucket on the first of the rados zones
zones[0].create_bucket(bucket_name)
# wait for sync
zone_meta_checkpoint(ps_zones[0].zone)
# create topic
topic_name = bucket_name + TOPIC_SUFFIX
topic_conf = PSTopic(ps_zones[0].conn, topic_name)
result, status = topic_conf.set_config()
assert_equal(status/100, 2)
parsed_result = json.loads(result)
topic_arn = parsed_result['arn']
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
generated_topic_name = notification_name+'_'+topic_name
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*']
}]
s3_notification_conf = PSNotificationS3(ps_zones[0].conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
zone_meta_checkpoint(ps_zones[0].zone)
# get auto-generated topic
generated_topic_conf = PSTopic(ps_zones[0].conn, generated_topic_name)
result, status = generated_topic_conf.get_config()
parsed_result = json.loads(result)
assert_equal(status/100, 2)
assert_equal(parsed_result['topic']['name'], generated_topic_name)
# get auto-generated notification
notification_conf = PSNotification(ps_zones[0].conn, bucket_name,
generated_topic_name)
result, status = notification_conf.get_config()
parsed_result = json.loads(result)
assert_equal(status/100, 2)
assert_equal(len(parsed_result['topics']), 1)
# get auto-generated subscription
sub_conf = PSSubscription(ps_zones[0].conn, notification_name,
generated_topic_name)
result, status = sub_conf.get_config()
parsed_result = json.loads(result)
assert_equal(status/100, 2)
assert_equal(parsed_result['topic'], generated_topic_name)
# delete s3 notification
_, status = s3_notification_conf.del_config(notification=notification_name)
assert_equal(status/100, 2)
# delete topic
_, status = topic_conf.del_config()
assert_equal(status/100, 2)
# verify low-level cleanup
_, status = generated_topic_conf.get_config()
assert_equal(status, 404)
result, status = notification_conf.get_config()
parsed_result = json.loads(result)
assert_equal(len(parsed_result['topics']), 0)
# TODO should return 404
# assert_equal(status, 404)
result, status = sub_conf.get_config()
parsed_result = json.loads(result)
assert_equal(parsed_result['topic'], '')
# TODO should return 404
# assert_equal(status, 404)
# cleanup
topic_conf.del_config()
# delete the bucket
zones[0].delete_bucket(bucket_name)
def test_ps_s3_notification_records():
""" test s3 records fetching """
zones, ps_zones = init_env()
bucket_name = gen_bucket_name()
# create bucket on the first of the rados zones
bucket = zones[0].create_bucket(bucket_name)
# wait for sync
zone_meta_checkpoint(ps_zones[0].zone)
# create topic
topic_name = bucket_name + TOPIC_SUFFIX
topic_conf = PSTopic(ps_zones[0].conn, topic_name)
result, status = topic_conf.set_config()
assert_equal(status/100, 2)
parsed_result = json.loads(result)
topic_arn = parsed_result['arn']
# create s3 notification
notification_name = bucket_name + NOTIFICATION_SUFFIX
topic_conf_list = [{'Id': notification_name,
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*']
}]
s3_notification_conf = PSNotificationS3(ps_zones[0].conn, bucket_name, topic_conf_list)
_, status = s3_notification_conf.set_config()
assert_equal(status/100, 2)
zone_meta_checkpoint(ps_zones[0].zone)
# get auto-generated subscription
sub_conf = PSSubscription(ps_zones[0].conn, notification_name,
topic_name)
_, status = sub_conf.get_config()
assert_equal(status/100, 2)
# create objects in the bucket
number_of_objects = 10
for i in range(number_of_objects):
key = bucket.new_key(str(i))
key.set_contents_from_string('bar')
# wait for sync
zone_bucket_checkpoint(ps_zones[0].zone, zones[0].zone, bucket_name)
# get the events from the subscription
result, _ = sub_conf.get_events()
records = json.loads(result)
for record in records['Records']:
log.debug(record)
keys = list(bucket.list())
# TODO: use exact match
verify_s3_records_by_elements(records, keys, exact_match=False)
# cleanup
_, status = s3_notification_conf.del_config()
topic_conf.del_config()
# delete the keys
for key in bucket.list():
key.delete()
zones[0].delete_bucket(bucket_name)
def test_ps_s3_notification():
""" test s3 notification set/get/delete """
zones, ps_zones = init_env()
bucket_name = gen_bucket_name()
# create bucket on the first of the rados zones
zones[0].create_bucket(bucket_name)
# wait for sync
zone_meta_checkpoint(ps_zones[0].zone)
topic_name = bucket_name + TOPIC_SUFFIX
# create topic
topic_name = bucket_name + TOPIC_SUFFIX
topic_conf = PSTopic(ps_zones[0].conn, topic_name)
response, status = topic_conf.set_config()
assert_equal(status/100, 2)
parsed_result = json.loads(response)
topic_arn = parsed_result['arn']
# create one s3 notification
notification_name1 = bucket_name + NOTIFICATION_SUFFIX + '_1'
topic_conf_list = [{'Id': notification_name1,
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*']
}]
s3_notification_conf1 = PSNotificationS3(ps_zones[0].conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf1.set_config()
assert_equal(status/100, 2)
# create another s3 notification with the same topic
notification_name2 = bucket_name + NOTIFICATION_SUFFIX + '_2'
topic_conf_list = [{'Id': notification_name2,
'TopicArn': topic_arn,
'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*']
}]
s3_notification_conf2 = PSNotificationS3(ps_zones[0].conn, bucket_name, topic_conf_list)
response, status = s3_notification_conf2.set_config()
assert_equal(status/100, 2)
zone_meta_checkpoint(ps_zones[0].zone)
# get all notification on a bucket
response, status = s3_notification_conf1.get_config()
assert_equal(status/100, 2)
assert_equal(len(response['TopicConfigurations']), 2)
assert_equal(response['TopicConfigurations'][0]['TopicArn'], topic_arn)
assert_equal(response['TopicConfigurations'][1]['TopicArn'], topic_arn)
# get specific notification on a bucket
response, status = s3_notification_conf1.get_config(notification=notification_name1)
assert_equal(status/100, 2)
assert_equal(response['NotificationConfiguration']['TopicConfiguration']['Topic'], topic_arn)
assert_equal(response['NotificationConfiguration']['TopicConfiguration']['Id'], notification_name1)
response, status = | |
<reponame>fahlmant/openshift-tools
#!/usr/bin/env python
''' Interact with Trello API as a CLI or Sopel IRC bot module '''
import argparse
from datetime import date
from email.mime.text import MIMEText
import json
import os
import re
import smtplib
import sys
# sopel is only for IRC bot installation. Not required for CLI
try:
import sopel.module # pylint: disable=import-error
except ImportError:
pass
try:
from urllib import urlencode
from urllib2 import HTTPError, Request, urlopen
except ImportError:
from urllib.error import HTTPError
from urllib.parse import urlencode
from urllib.request import Request, urlopen
# constants
DEFAULT_LIST = "Active"
DEFAULT_SNOWFLAKE_LIST = "Snowflakes"
DEFAULT_RESOLVED_LIST = "Resolved"
BASE_URL = "https://api.trello.com/1"
EMAIL_SERVER = 'smtp.redhat.com'
EMAIL_FROM = '<EMAIL>'
EMAIL_REPLYTO = '<EMAIL>'
class Trello(object):
"""Trello object"""
def __init__(self):
"""Set object params"""
self.args = None
self.api_key = os.environ.get("trello_consumer_key", None)
self.oauth_token = os.environ.get("trello_oauth_token", None)
self.board_id = os.environ.get("trello_board_id", None)
self.board_id_long = os.environ.get("trello_board_id_long", None)
self.email_addresses = os.environ.get("trello_report_email_addresses", None)
@staticmethod
def parse_args():
"""Parse CLI arguments"""
parser = argparse.ArgumentParser(
description='Create, comment, move Trello cards, also reporting.')
subparsers = parser.add_subparsers(help='sub-command help')
parser_get = subparsers.add_parser('get',
help="""Get board information:
card list, user list or
card details""")
parser_get.add_argument(
'card',
nargs='?',
metavar='CARD_URL',
help='Card short URL to get details for')
parser_get.add_argument(
'--list', '-l',
metavar='TRELLO_LIST',
default=DEFAULT_LIST,
help='List to display cards from, e.g. "Resolved" or "Snowflakes"')
parser_get.add_argument(
'--users', '-u',
action='store_true',
help='Display board users')
parser_get.set_defaults(action='get')
parser_create = subparsers.add_parser('create',
help='Create a new card')
parser_create.set_defaults(action='create')
parser_create.add_argument(
'title', metavar='TITLE',
help='Card title')
parser_update = subparsers.add_parser('update',
help='Update an existing card')
parser_update.set_defaults(action='update')
parser_update.add_argument(
'card', metavar='CARD', help='Existing card URL')
parser_update.add_argument(
'--comment', '-c',
help='Add a comment')
parser_update.add_argument(
'--move', '-m',
metavar='LIST_NAME',
help='Move card to another list, e.g. "Resolved" or "Snowflakes"')
parser_update.add_argument(
'--assign', '-a',
metavar='USER',
help='Attach Trello user to card')
parser_update.add_argument(
'--unassign', '-u',
metavar='USER',
help='Remove Trello user from card')
parser_report = subparsers.add_parser('report',
help="Generate reports")
parser_report.set_defaults(action='report')
parser_report.add_argument(
'--email', '-e',
metavar='ADDRESS[,ADDRESS]',
help="""Comma-separated (no spaces) list of email addresses to
send report to. Overrides env var 'trello_report_email_addresses'""")
parser_report.add_argument(
'--move', '-m',
action='store_true',
help='Move cards to end-of-week list')
return parser.parse_args()
def create(self, title=None):
"""Create card"""
if not title:
title = self.args.title
card = self.trello_create(title)
return card['shortUrl']
def update(self):
"""Update card"""
if self.trello_update(self.card_id(self.args.card)):
print("Updated")
else:
sys.exit("No updates applied")
def get(self):
"""Get card details or list of cards"""
cards = None
if self.args.card:
return json.dumps(self.trello_get(self.card_id()), indent=4)
cards = self.trello_get()
results = ''
for card in cards:
members = ''
if self.args.users:
results += '{} {}\n'.format(str(card['username']),
str(card['fullName']))
else:
for member in card['idMembers']:
if member:
members += str(self.member_username(member)) + ' '
results += '{} {} ({})\n'.format(str(card['shortUrl']),
str(card['name']),
members)
return results
def report(self, email=None, move=False):
"""Generate reports"""
payload = self.report_payload()
print(payload)
_env_email = os.environ.get("trello_report_email_addresses", None)
if _env_email:
email = _env_email
if self.args:
if self.args.email:
email = self.args.email
if self.args.move:
move = self.args.move
week_number = date.today().isocalendar()[1]
if email:
subj = "OpenShift SRE Report for Week #{} (ending {})".format(
week_number, date.today().strftime("%d-%m-%Y"))
_board_url = "https://trello.com/b/{}".format(
os.environ.get("trello_board_id", None))
payload = "For more information visit the SRE 24x7 board {}\n\n{}".format(
_board_url, payload)
self.email_report(email, subj, payload)
print("Report emailed to {}".format(email))
if move:
list_name = "Week #{}".format(week_number)
if self.move_cards(to_list=list_name):
print("Cards moved to list '{}'".format(list_name))
def report_payload(self):
"""Return report payload
:return: formatted report"""
data = ""
resolved_cards = self.get_list_cards(DEFAULT_RESOLVED_LIST)
data += "{}: {}\n".format(DEFAULT_LIST,
len(self.get_list_cards(DEFAULT_LIST)))
data += "{}: {}\n".format(DEFAULT_SNOWFLAKE_LIST,
len(self.get_list_cards(DEFAULT_SNOWFLAKE_LIST)))
data += "{}: {}\n".format(DEFAULT_RESOLVED_LIST,
len(resolved_cards))
data += "\n---\nResolved issues:\n---\n"
for card in resolved_cards:
data += "{} {}\n".format(card['shortUrl'], card['name'])
return data
def move_cards(self, to_list, from_list=None):
"""Move cards from one list to another
:param to_list (required): name of list to move cards to
:param from_list (optional, use default): name of list to move card from
:return: None"""
params = {}
if not to_list:
print("Cannot move: no destination list provided")
return False
if not from_list:
from_list = DEFAULT_RESOLVED_LIST
to_list_id = self.create_list(to_list)
path = "/lists/" + self.get_list_id(from_list) + "/moveAllCards"
params['idBoard'] = self.board_id_long
params['idList'] = to_list_id
return self.make_request(path, 'POST', params)
def create_list(self, name=None):
"""Create new list
:param name: name of list
:return: list ID"""
params = {}
params['name'] = name
params['idBoard'] = self.board_id_long
params['pos'] = "bottom"
newlist = self.make_request('/lists', 'POST', params)
return newlist['id']
@staticmethod
def email_report(email, subj, body):
"""Email report
:param email: email address
:param subj: email subject
:param body: email body
:return: None"""
msg = MIMEText(body)
msg['Subject'] = subj
msg['From'] = EMAIL_FROM
msg['To'] = email
msg['Reply-to'] = EMAIL_REPLYTO
smtpcxn = smtplib.SMTP(host=EMAIL_SERVER, port='25')
smtpcxn.sendmail(email, email, msg.as_string())
smtpcxn.quit()
def get_list_cards(self, trello_list=DEFAULT_LIST):
"""Return card total for given list
:param trello_list: list name
:return: cards array"""
path = "/lists/%s/cards" % self.get_list_id(trello_list)
return self.make_request(path)
def trello_update(self, card_id, **kwargs):
"""Call trello update API
:param card_id: card ID
:return: success boolean"""
params = {}
path = None
updated = False
# handle being called via CLI or bot
if self.args:
if self.args.comment:
kwargs['comment'] = self.args.comment
if self.args.move:
kwargs['move'] = self.args.move
if self.args.assign:
kwargs['assign'] = self.args.assign
if self.args.unassign:
kwargs['unassign'] = self.args.unassign
# Since the trello API is different calls/methods for different data
# we call multiple times
if 'comment' in kwargs:
params['text'] = kwargs['comment']
path = '/cards/' + card_id + '/actions/comments'
updated = self.make_request(path, "POST", params)
if 'resolve' in kwargs:
params['idList'] = self.get_list_id(DEFAULT_RESOLVED_LIST)
path = '/cards/' + card_id
updated = self.make_request(path, "PUT", params)
if 'move' in kwargs:
params['idList'] = self.get_list_id(kwargs['move'])
path = '/cards/' + card_id
updated = self.make_request(path, "PUT", params)
if 'assign' in kwargs:
params['value'] = self.member_id(kwargs['assign'])
path = '/cards/' + card_id + '/idMembers'
updated = self.make_request(path, "POST", params)
if 'unassign' in kwargs:
path = '/cards/' + card_id + '/idMembers/' + self.member_id(kwargs['unassign'])
updated = self.make_request(path, "DELETE", params)
return updated
def trello_create(self, title):
"""Call trello create API
:param title: name/title of card
:return: card"""
params = {}
params['idList'] = self.get_list_id()
params['name'] = title
path = '/cards'
return self.make_request(path, "POST", params)
def member_username(self, memberid):
"""Get member username from member ID"""
member = self.make_request('/members/' + memberid)
return member['username']
def member_id(self, username):
"""Get member id from username"""
members = self.make_request('/boards/' + self.board_id + '/members/')
for member in members:
if username == member['username']:
return member['id']
def card_id(self, url=None):
"""Return parsed card ID from URL
example: https://trello.com/c/PZlOHgGm
returns: PZlOHgGm
:param url: trello short URL
:return: trello card ID"""
if not url:
url = self.args.card
parsed_uri = url.split("/")
return parsed_uri[-1]
def get_list_id(self, list_id=None):
"""Return the list ID
:param list_id: list ID if not default
:return: list_id"""
default = DEFAULT_LIST
if list_id:
default = list_id
path = '/boards/' + self.board_id + '/lists/'
lists = self.make_request(path)
# match board name regardless of case
pattern = re.compile(default, re.I)
for board_list in lists:
if re.match(pattern, board_list['name']):
return board_list['id']
sys.exit("List '%s' not found" % list_id)
def trello_get(self, card_id=None):
"""Get trello cards
:param card_id: trello card ID
:return: trello json"""
path = None
if card_id:
path = '/cards/' + card_id
elif self.args.users:
path = '/boards/' + self.board_id + '/members'
else:
path = '/lists/' + self.get_list_id(self.args.list) + '/cards'
results = self.make_request(path)
return results
def make_request(self, path, method="GET", params=None):
"""Trello API call
:param path: trello API path
:param method: rest call method
:param params: API params
:return: trello json"""
if not params:
params = {}
params['key'] = self.api_key
params['token'] = self.oauth_token
url = BASE_URL + path
data = None
if method == "GET":
url += '?' + urlencode(params)
elif method in ['DELETE', 'POST', 'PUT']:
data = urlencode(params).encode('utf-8')
request = Request(url)
if method in ['DELETE', 'PUT']:
request.get_method = lambda: method
try:
if data:
response = urlopen(request, data=data)
else:
response = urlopen(request)
except HTTPError as err:
print(err)
print(err.read())
result = None
else:
result = json.loads(response.read().decode('utf-8'))
return result
def get_trello_id(ircnick):
"""Return trello ID for a given IRC nick"""
key = 'IRCNICK_' + ircnick
try:
return os.environ[key]
except KeyError:
print("%s, you need to map your IRC nick with Trello username" % ircnick)
return None
@sopel.module.commands('issue')
def issue(bot, trigger):
"""Record a new issue in Trello, e.g. '.issue Some issue text'"""
trellobot = Trello()
card = trellobot.trello_create(trigger.group(2))
bot.say(card['shortUrl'])
if not trellobot.trello_update(trellobot.card_id(card['shortUrl']),
assign=get_trello_id(trigger.nick)):
bot.reply(
"you need to map your IRC nick with Trello username." +
"See https://github.com/openshift/openshift-ansible-ops/tree/prod/playbooks/adhoc/ircbot")
@sopel.module.commands('comment')
def comment(bot, trigger):
"""Add comment to a trello card, e.g. '.comment <trelloShortUrl> My comment'"""
trellobot = Trello()
msg = trigger.group(2).partition(' ')
trellobot.trello_update(trellobot.card_id(msg[0]), comment=msg[2])
bot.say('Comment added')
@sopel.module.commands('resolve', 'resolved')
def resolve(bot, trigger):
"""Resolve a trello card, e.g. '.resolve <trelloShortUrl>'"""
trellobot = Trello()
if trellobot.trello_update(trellobot.card_id(trigger.group(2)), resolve=True):
card = trellobot.trello_get(trellobot.card_id(trigger.group(2)))
bot.say('Resolved {}: {}'.format(trigger.group(2), card['name']))
else:
bot.say('Could not resolve %s' % trigger.group(2))
def main():
"""
main() function
:return:
"""
trello = Trello()
| |
the bucket where the artifact resides"
)
region: Optional[str] = Field(
None, description="Region contains the optional bucket region"
)
roleARN: Optional[str] = Field(
None,
description="RoleARN is the Amazon Resource Name (ARN) of the role to assume.",
)
secretKeySecret: Optional[v1.SecretKeySelector] = Field(
None,
description="SecretKeySecret is the secret selector to the bucket's secret key",
)
useSDKCreds: Optional[bool] = Field(
None,
description="UseSDKCreds tells the driver to figure out credentials based on sdk defaults.",
)
class SemaphoreRef(BaseModel):
configMapKeyRef: Optional[v1.ConfigMapKeySelector] = Field(
None,
description="ConfigMapKeyRef is configmap selector for Semaphore configuration",
)
class Sequence(BaseModel):
count: Optional[intstr.IntOrString] = Field(
None,
description="Count is number of elements in the sequence (default: 0). Not to be used with end",
)
end: Optional[intstr.IntOrString] = Field(
None,
description="Number at which to end the sequence (default: 0). Not to be used with Count",
)
format: Optional[str] = Field(
None,
description="Format is a printf format string to format the value in the sequence",
)
start: Optional[intstr.IntOrString] = Field(
None, description="Number at which to start the sequence (default: 0)"
)
class SubmitOpts(BaseModel):
annotations: Optional[str] = Field(
None, description="Annotations adds to metadata.labels"
)
dryRun: Optional[bool] = Field(
None,
description="DryRun validates the workflow on the client-side without creating it. This option is not supported in API",
)
entryPoint: Optional[str] = Field(
None, description="Entrypoint overrides spec.entrypoint"
)
generateName: Optional[str] = Field(
None, description="GenerateName overrides metadata.generateName"
)
labels: Optional[str] = Field(None, description="Labels adds to metadata.labels")
name: Optional[str] = Field(None, description="Name overrides metadata.name")
ownerReference: Optional[v1_1.OwnerReference] = Field(
None, description="OwnerReference creates a metadata.ownerReference"
)
parameterFile: Optional[str] = Field(
None,
description="ParameterFile holds a reference to a parameter file. This option is not supported in API",
)
parameters: Optional[List[str]] = Field(
None, description="Parameters passes input parameters to workflow"
)
serverDryRun: Optional[bool] = Field(
None,
description="ServerDryRun validates the workflow on the server-side without creating it",
)
serviceAccount: Optional[str] = Field(
None,
description="ServiceAccount runs all pods in the workflow using specified ServiceAccount.",
)
class Synchronization(BaseModel):
mutex: Optional[Mutex] = Field(
None, description="Mutex holds the Mutex lock details"
)
semaphore: Optional[SemaphoreRef] = Field(
None, description="Semaphore holds the Semaphore configuration"
)
class WorkflowSubmitRequest(BaseModel):
namespace: Optional[str] = None
resourceKind: Optional[str] = None
resourceName: Optional[str] = None
submitOptions: Optional[SubmitOpts] = None
class Artifact(BaseModel):
archive: Optional[ArchiveStrategy] = Field(
None,
description="Archive controls how the artifact will be saved to the artifact repository.",
)
archiveLogs: Optional[bool] = Field(
None,
description="ArchiveLogs indicates if the container logs should be archived",
)
artifactory: Optional[ArtifactoryArtifact] = Field(
None, description="Artifactory contains artifactory artifact location details"
)
from_: Optional[str] = Field(
None,
alias="from",
description="From allows an artifact to reference an artifact from a previous step",
)
gcs: Optional[GCSArtifact] = Field(
None, description="GCS contains GCS artifact location details"
)
git: Optional[GitArtifact] = Field(
None, description="Git contains git artifact location details"
)
globalName: Optional[str] = Field(
None,
description="GlobalName exports an output artifact to the global scope, making it available as '{{io.argoproj.workflow.v1alpha1.outputs.artifacts.XXXX}} and in workflow.status.outputs.artifacts",
)
hdfs: Optional[HDFSArtifact] = Field(
None, description="HDFS contains HDFS artifact location details"
)
http: Optional[HTTPArtifact] = Field(
None, description="HTTP contains HTTP artifact location details"
)
mode: Optional[int] = Field(
None,
description="mode bits to use on this file, must be a value between 0 and 0777 set when loading input artifacts.",
)
name: str = Field(
...,
description="name of the artifact. must be unique within a template's inputs/outputs.",
)
optional: Optional[bool] = Field(
None,
description="Make Artifacts optional, if Artifacts doesn't generate or exist",
)
oss: Optional[OSSArtifact] = Field(
None, description="OSS contains OSS artifact location details"
)
path: Optional[str] = Field(
None, description="Path is the container path to the artifact"
)
raw: Optional[RawArtifact] = Field(
None, description="Raw contains raw artifact location details"
)
recurseMode: Optional[bool] = Field(
None,
description="If mode is set, apply the permission recursively into the artifact if it is a folder",
)
s3: Optional[S3Artifact] = Field(
None, description="S3 contains S3 artifact location details"
)
subPath: Optional[str] = Field(
None,
description="SubPath allows an artifact to be sourced from a subpath within the specified source",
)
class ArtifactLocation(BaseModel):
archiveLogs: Optional[bool] = Field(
None,
description="ArchiveLogs indicates if the container logs should be archived",
)
artifactory: Optional[ArtifactoryArtifact] = Field(
None, description="Artifactory contains artifactory artifact location details"
)
gcs: Optional[GCSArtifact] = Field(
None, description="GCS contains GCS artifact location details"
)
git: Optional[GitArtifact] = Field(
None, description="Git contains git artifact location details"
)
hdfs: Optional[HDFSArtifact] = Field(
None, description="HDFS contains HDFS artifact location details"
)
http: Optional[HTTPArtifact] = Field(
None, description="HTTP contains HTTP artifact location details"
)
oss: Optional[OSSArtifact] = Field(
None, description="OSS contains OSS artifact location details"
)
raw: Optional[RawArtifact] = Field(
None, description="Raw contains raw artifact location details"
)
s3: Optional[S3Artifact] = Field(
None, description="S3 contains S3 artifact location details"
)
class Inputs(BaseModel):
artifacts: Optional[List[Artifact]] = Field(
None, description="Artifact are a list of artifacts passed as inputs"
)
parameters: Optional[List[Parameter]] = Field(
None, description="Parameters are a list of parameters passed as inputs"
)
class Outputs(BaseModel):
artifacts: Optional[List[Artifact]] = Field(
None,
description="Artifacts holds the list of output artifacts produced by a step",
)
exitCode: Optional[str] = Field(
None, description="ExitCode holds the exit code of a script template"
)
parameters: Optional[List[Parameter]] = Field(
None,
description="Parameters holds the list of output parameters produced by a step",
)
result: Optional[str] = Field(
None, description="Result holds the result (stdout) of a script template"
)
class PodGC(BaseModel):
labelSelector: Optional[v1_1.LabelSelector] = Field(
None,
description="LabelSelector is the label selector to check if the pods match the labels before being added to the pod GC queue.",
)
strategy: Optional[str] = Field(
None,
description='Strategy is the strategy to use. One of "OnPodCompletion", "OnPodSuccess", "OnWorkflowCompletion", "OnWorkflowSuccess"',
)
class Arguments(BaseModel):
artifacts: Optional[List[Artifact]] = Field(
None,
description="Artifacts is the list of artifacts to pass to the template or workflow",
)
parameters: Optional[List[Parameter]] = Field(
None,
description="Parameters is the list of parameters to pass to the template or workflow",
)
class DAGTask(BaseModel):
arguments: Optional[Arguments] = Field(
None,
description="Arguments are the parameter and artifact arguments to the template",
)
continueOn: Optional[ContinueOn] = Field(
None,
description="ContinueOn makes argo to proceed with the following step even if this step fails. Errors and Failed states can be specified",
)
dependencies: Optional[List[str]] = Field(
None, description="Dependencies are name of other targets which this depends on"
)
depends: Optional[str] = Field(
None, description="Depends are name of other targets which this depends on"
)
name: str = Field(..., description="Name is the name of the target")
onExit: Optional[str] = Field(
None,
description="OnExit is a template reference which is invoked at the end of the template, irrespective of the success, failure, or error of the primary template.",
)
template: Optional[str] = Field(None, description="Name of template to execute")
templateRef: Optional[TemplateRef] = Field(
None,
description="TemplateRef is the reference to the template resource to execute.",
)
when: Optional[str] = Field(
None,
description="When is an expression in which the task should conditionally execute",
)
withItems: Optional[List[Item]] = Field(
None,
description="WithItems expands a task into multiple parallel tasks from the items in the list",
)
withParam: Optional[str] = Field(
None,
description="WithParam expands a task into multiple parallel tasks from the value in the parameter, which is expected to be a JSON list.",
)
withSequence: Optional[Sequence] = Field(
None, description="WithSequence expands a task into a numeric sequence"
)
class DAGTemplate(BaseModel):
failFast: Optional[bool] = Field(
None,
description='This flag is for DAG logic. The DAG logic has a built-in "fail fast" feature to stop scheduling new steps, as soon as it detects that one of the DAG nodes is failed. Then it waits until all DAG nodes are completed before failing the DAG itself. The FailFast flag default is true, if set to false, it will allow a DAG to run all branches of the DAG to completion (either success or failure), regardless of the failed outcomes of branches in the DAG. More info and example about this feature at https://github.com/argoproj/argo_workflows/issues/1442',
)
target: Optional[str] = Field(
None, description="Target are one or more names of targets to execute in a DAG"
)
tasks: List[DAGTask] = Field(..., description="Tasks are a list of DAG tasks")
class NodeStatus(BaseModel):
boundaryID: | |
self.pupil = np.zeros([self.m_crop, self.n_crop], dtype=np.complex128)
# Keep track of which iteration we are on
self.current_itr = 0
# Store frame_state_list
self.frame_state_list = dataset.frame_state_list
# Multiplexing parameters
# self.multiplexing = True
# if self.multiplexing:
# self.numlits = np.ones(np.size(self.n_frames))
# crop_size = (self.n_frames, max(self.numlits), self.m_crop, self.n_crop)
# else:
# self.numlits = np.zeros(self.n_frames, dtype=int)
# for frame_index in range(self.n_frames):
# self.numlits[frame_index] = 1
# crop_size = (self.n_frames, 1, self.m_crop, self.n_crop)
# Convert source_list_na to cropx and cropy
# print(self.source_list_na)
self.na2crop()
# Determine number of channels
max_length = 0
# Check that crops are within reconstruction size
assert np.max(self.cropxend) < self.N, "cropxend (%d) is > N (%d)" % (np.max(self.cropxend), self.N)
assert np.max(self.cropyend) < self.M, "cropyend (%d) is > M (%d)" % (np.max(self.cropyend), self.M)
assert np.min(self.cropxstart) >= 0, "cropxstart (%d) is < 0" % (np.min(self.cropxstart))
assert np.min(self.cropystart) >= 0, "cropystart (%d) is < 0" % (np.min(self.cropystart))
# Make sure extrapolated points from Regina's method are fit to a homographic linear transformation
if self.options.led_auto_calib_use_pre_rigid:
if not self.options.quiet:
print("Performing rigid fit of pre-calibration...")
self.fitLedNaToRigidTransform(global_transformation=True)
# Add perturbation to led positions if user indicates
if self.options.led_auto_calib_add_error_na > 0:
if not self.options.quiet:
print("Adding random perturbations with magnitude %.2f NA to LED positions." % options.led_auto_calib_add_error_na)
na_perturb = self.options.led_auto_calib_add_error_na * \
(np.random.rand(self.source_list_na.shape[0], self.source_list_na.shape[1]) - 0.5)
self.source_list_na = self.source_list_na.copy() + na_perturb
self.na2crop()
# TODO figure out why this would be necessary-- basically inverses what na2crop does, but na2crop keeps
# the original source_list_na intact, so idk why we would use this function.
# Update source_list_na
# self.source_list_na = self.crop2na()
# Store initial source points
# self.source_list_na_init = self.crop2na()
# Generate a mask for brightfield images
self.brightfield_mask = np.squeeze(np.sqrt(self.source_list_na[:, 0] ** 2 + self.source_list_na[:, 1] ** 2) < dataset.metadata.objective.na)
# Create grid in Fourier domain
fy = np.arange(-self.m_crop/2, self.m_crop/2) / (self.eff_pixel_size * self.m_crop)
fx = np.arange(-self.n_crop/2, self.n_crop/2) / (self.eff_pixel_size * self.n_crop)
[fxx, fyy] = np.meshgrid(fx, fy)
# Pupil initialization
r = (fxx ** 2 + fyy ** 2) ** 0.5
if self.dataset.metadata.camera.is_color:
self.pupil = []
for wavelength in self.wavelength_list:
self.pupil.append(r < (dataset.metadata.objective.na) / wavelength)
self.pupil = np.asarray(self.pupil)
else:
self.pupil = (r < (dataset.metadata.objective.na) / self.wavelength_list[0]).astype(np.complex128)
# Object initialization
if self.dataset.metadata.camera.is_color:
self.objf = np.zeros([self.M, self.N, 3], dtype=np.complex128)
else:
self.objf = np.zeros([self.M, self.N], dtype=np.complex128)
if self.options.obj_init:
self.objf[np.floor((self.M - self.m_crop) / 2).astype(np.int):np.floor((self.M + self.m_crop)/2).astype(np.int),
np.floor((self.N - self.n_crop) / 2).astype(np.int):np.floor((self.N + self.n_crop)/2).astype(np.int)] = self.F(dataset.obj_init)
else:
self.objf[np.floor((self.M - self.m_crop) / 2).astype(np.int):np.floor((self.M + self.m_crop)/2).astype(np.int),
np.floor((self.N - self.n_crop) / 2).astype(np.int):np.floor((self.N + self.n_crop)/2).astype(np.int)] += self.F(np.sqrt(self.frame_list[0, :, :])) * self.pupil
self.obj = self.iF(self.objf) / (self.scale ** 2)
def F(self, x):
"""
Forward Fourier transform operator
"""
if np.array_equal([self.m_crop, self.n_crop], x.shape):
self._plan_small_f.input_array[:] = np.fft.ifftshift(x)
return np.fft.fftshift(self._plan_small_f()).copy()
elif np.array_equal([self.M, self.N], x.shape):
self._plan_large_f.input_array[:] = np.fft.ifftshift(x)
return np.fft.fftshift(self._plan_large_f()).copy()
else:
raise ValueError("FFT size did not match n_crop or N!")
def iF(self, x):
"""
Inverse Fourier transform operator
"""
if np.array_equal([self.m_crop, self.n_crop], x.shape):
self._plan_small_b.input_array[:] = np.fft.ifftshift(x)
return np.fft.fftshift(self._plan_small_b()).copy()
elif np.array_equal([self.M, self.N], x.shape):
self._plan_large_b.input_array[:] = np.fft.ifftshift(x)
return np.fft.fftshift(self._plan_large_b()).copy()
else:
raise ValueError("FFT size did not match n_crop or N!")
def crop2na(self):
'''
Function to convert current LED kx/ky cropping coordinates to NA
'''
source_list_na_up = np.zeros(self.source_list_na.shape)
for frame_index in range(len(self.cropystart)):
for led_index in range(len(self.cropystart)[frame_index]):
if np.ndim(self.cropxstart) is 1:
source_list_na_up[:, 0] = (self.cropxstart + self.n_crop / 2 - self.N / 2) * self.wavelength_list / (self.recon_pixel_size * self.N)
source_list_na_up[:, 1] = (self.cropystart + self.m_crop / 2 - self.M / 2) * self.wavelength_list / (self.recon_pixel_size * self.M)
else:
source_list_na_up[:, 0] = (self.cropxstart[:, 0] + self.n_crop / 2 - self.N / 2) * self.wavelength_list / (self.recon_pixel_size * self.N)
source_list_na_up[:, 1] = (self.cropystart[:, 0] + self.m_crop / 2 - self.M / 2) * self.wavelength_list / (self.recon_pixel_size * self.M)
return(source_list_na_up)
def na2crop(self):
'''
Function to convert current NA to kx/ky crop coordinates
'''
pupilshifty = []
pupilshiftx = []
for frame_index in range(self.n_frames):
frame_state = self.frame_state_list[frame_index]
frame_pupilshiftx = []
frame_pupilshifty = []
for led_index in range(len(frame_state['illumination']['sequence'])):
led_pupilshiftx = [0, 0, 0]
led_pupilshifty = [0, 0, 0]
for color_index, color_str in enumerate(self.color_list):
# TODO Make this work for flickering LEDs
val = frame_state['illumination']['sequence'][led_index][0]['value'][color_str] / ((2 ** self.dataset.metadata.illumination.bit_depth) - 1)
led_number = frame_state['illumination']['sequence'][led_index][0]['index']
na_x = self.source_list_na[led_number][0]
na_y = self.source_list_na[led_number][1]
led_pupilshiftx[color_index] = np.round(na_x / self.wavelength_list[color_index] * self.recon_pixel_size * self.N)
led_pupilshifty[color_index] = np.round(na_y / self.wavelength_list[color_index] * self.recon_pixel_size * self.M)
# Add this led to frame pupilshift
frame_pupilshiftx.append(led_pupilshiftx)
frame_pupilshifty.append(led_pupilshifty)
# Update global pupilshiftx
pupilshiftx.append(frame_pupilshiftx)
pupilshifty.append(frame_pupilshifty)
# Cropping index in Fourier domain
self.cropystart = (self.M / 2 + pupilshifty - self.m_crop / 2).astype(int)
self.cropyend = (self.M / 2 + pupilshifty + self.m_crop / 2).astype(int)
self.cropxstart = (self.N / 2 + pupilshiftx - self.n_crop / 2).astype(int)
self.cropxend = (self.N / 2 + pupilshiftx + self.n_crop / 2).astype(int)
# Function for finding average rigid transform for LED positiions
def fitLedNaToRigidTransform(self, leds_to_process=-1, boards_to_process=-1, global_transformation=False,
write_results=True, mode=""):
if type(leds_to_process) is not list:
leds_to_process = [leds_to_process] # Convert to list
if type(boards_to_process) is not list:
boards_to_process = [boards_to_process] # Convert to list
if leds_to_process[0] == -1 and len(leds_to_process) == 1:
leds_to_process = np.arange(self.n_frames)
if boards_to_process[0] == -1 and len(boards_to_process) == 1:
boards_to_process = range(np.min(self.source_list_board_idx.astype(np.int)), np.max(self.source_list_board_idx.astype(np.int)) + 1)
if not global_transformation:
board_map = self.source_list_board_idx
else:
board_map = np.zeros(self.source_list_board_idx.shape, dtype=np.bool)
boards_to_process = [0]
if mode is "":
mode = self.options.led_auto_calib_rigid_trans_type
self.source_list_na = self.crop2na()
source_list_na_local = self.source_list_na.copy()
# Loop over all boards
for board_idx in boards_to_process:
# Define a list of points in ideal orientation
mask_led = np.zeros(self.source_list_board_idx.shape, dtype=np.bool)
mask_board = np.zeros(self.source_list_board_idx.shape, dtype=np.bool)
mask_led[leds_to_process] = True
mask_board[board_map == board_idx] = True
mask = mask_board & mask_led
if np.sum(mask) > 8: # we need the problem to be well-posed
# The two lists of points we want to update
na_pos_design = self.source_list_na_design[mask, :]
na_pos_updated = self.source_list_na[mask, :]
pad = lambda x: np.hstack([x, np.ones((x.shape[0], 1))])
unpad = lambda x: x[:, :-1]
Y = pad(na_pos_design)
X = pad(na_pos_updated)
if mode == 'lstsq':
H, res, rank, s = np.linalg.lstsq(Y, X) # ax=b (a,b)
transform = lambda x: unpad(np.dot(pad(x), H))
source_list_na_local[mask, :] = transform(na_pos_design)
elif mode == 'homog':
fp = ransac.make_homog(na_pos_design.T)
tp = ransac.make_homog(na_pos_updated.T)
H = ransac.H_from_points(fp, tp)
source_list_na_local[mask, :] = (ransac.normalize(H.dot(fp)).T[:, 0:2])
elif mode == 'affine':
fp = ransac.make_homog(na_pos_design.T)
tp = ransac.make_homog(na_pos_updated.T)
H = ransac.Haffine_from_points(fp, tp)
source_list_na_local[mask, :] = (ransac.normalize(H.dot(fp)).T[:, 0:2])
elif mode == 'ransac':
fp = ransac.make_homog(na_pos_design.T)
tp = ransac.make_homog(na_pos_updated.T)
my_model = ransac.RansacModel()
H = ransac.H_from_ransac(fp, tp, my_model, maxiter=100, match_theshold=20, n_close=20)[0]
source_list_na_local[mask, :] = (ransac.normalize(H.dot(fp)).T[:, 0:2])
if write_results:
self.source_list_na = source_list_na_local.copy()
self.na2crop()
return source_list_na_local.copy()
def findLedNaError(self, leds_to_process=-1, scan_range=-1, radial_penalty=-1, write_results=True, grad_iter=-1, scan_mode=""):
# Determine LEDs to run
if type(leds_to_process) is not list:
leds_to_process = [leds_to_process] # Convert to list
# If we pass -1, process all leds.
if leds_to_process[0] < 0:
leds_to_process = np.arange(self.n_frames)
# Use default scan range if we don't override
if scan_range < 0:
scan_range = self.options.led_auto_calib_scan_range
scan_range_total = 2 * scan_range + 1
# Use default grad iteration if we don't override
if grad_iter < 0:
grad_iter = self.options.led_auto_calib_scan_range
if radial_penalty < 0:
radial_penalty = self.options.led_auto_calib_rad_pen
if scan_mode == "":
scan_mode = self.options.led_auto_calib_scan_mode
dc_vals = np.zeros((scan_range_total, scan_range_total))
for img_idx in leds_to_process:
# Determine if we're going to process this LED or not, based on settings and the NA of this LED
is_brightfield = self.brightfield_mask[img_idx]
dc_vals_list = []
if (is_brightfield & ((scan_mode == "all") | (scan_mode == "bf"))
| (not is_brightfield) & ((scan_mode == "all") | (scan_mode == "df"))):
# Variables for global update
dk_x_up = 0
dk_y_up = 0
# Initialize
dc_vals = np.zeros((scan_range_total, scan_range_total))
# Outer Loop, gradient steps
for gItr in np.arange(grad_iter):
# Inner loop, over dk_x and dk_y
for dkx_i in np.arange(scan_range_total).astype(np.int):
for dky_i in np.arange(scan_range_total).astype(np.int):
dkx = (-np.floor(scan_range_total / 2) + dkx_i).astype(np.int) + dk_x_up
dky = (-np.floor(scan_range_total / 2) + dky_i).astype(np.int) + dk_y_up
I_p = np.zeros(self.frame_list[img_idx, :, :].shape)
for l_idx in np.arange(np.size(self.cropystart, 1)):
I_p = I_p + np.abs(self.iF(self.objf[(dky + self.cropystart[img_idx][l_idx]):(dky + self.cropyend[img_idx][l_idx]),
(dkx + self.cropxstart[img_idx][l_idx]):(dkx + self.cropxend[img_idx][l_idx])] * self.pupil)) ** 2
I_m = self.frame_list[img_idx, :, :]
mean_m = max(np.mean(I_m), 1e-10)
mean_p = max(np.mean(I_p), 1e-10)
I_m = (I_m / mean_m) - 1.0
I_p = (I_p / | |
<gh_stars>0
"""
Author : <NAME>
Mail : <EMAIL>
Version : 0.1
Date : 04/12/2019
Update : 14/12/2019
Python : 3.6.5
Update Note : Arranging system of ODE methods and descriptions.
This script written by @Author for personal usage.
Prerequest : numpy
"""
import numpy as np
class ODE():
"""
This class written for numerical methods for Ordinary
Differential Equations(ODE).
@Methods :
- Euler
- Heun
- Midpoint
- RK2
- RK3
- RK4
- RK5
- System of ODE's Euler
- System of ODE's RK4
@Usage :
...
solver = ODE()
solver.@Methods
...
"""
def Euler(self, xi, xf, yi, h, dydx):
""" Euler Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.Euler(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
y_next = yi + dydx(xi,yi) * h
xi += h
yi = y_next
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def SystemEuler(self, xi, xf, yi, h, dydx):
""" Euler Method for System of ODE.
@Note : yi and dydx should be array.
`Derivative functions parameter should be written
w.r.t args. Description in '@Args'.`
Arguments :
-------------
xi = Initial value of x for each function.
xf = Final value of x for each function.
yi = Initial value of y for each function.
h = Step size.
dydx : Target functions's derivative function
which argument depend on args.
@Args :
Order of parameter of function should be same. \n
If f1(x,y1,y2,...) and f2(x,y1,y2,...) then function's arguments should be in array args = [x,y1,y2,...]. \n
@ Example :
dy1dx : -0.5x + y1
dy2dx : 0.2y1 + 0.6y2 - 3x
: First function x parameter (x) in args[0] and y
parameter (y1) in args[1]. \n
: Second function y
parameter (y2) in args[2].
def df1(args):
return (-0.5 args[0] + args[1])
def df2(args):
return (0.2 args[1] + 0.6 args[2] - 3 args[0])
...
solver = ODE()
solver.SystemEuler(0,5,[2,2],0.2,[df1,df2])
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr, args = np.array([xi]), np.array([yi]), []
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
args.append(xi)
for g in range(len(dydx)):
args.append(yi[g])
for j in range(len(dydx)):
yi[j] = yi[j] + dydx[j](args) * h
xi += h
x_arr = np.append(x_arr,[xi],0)
y_arr = np.append(y_arr,[yi],0)
args = []
return x_arr, y_arr
def Heun(self, xi, xf, yi, h, dydx):
""" Heun Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.Heun(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
y_next_0 = yi + dydx(xi,yi) * h
y_next_1 = dydx(xi + h, y_next_0)
yi = yi + (dydx(xi,yi) + y_next_1) / 2 * h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def Midpoint(self, xi, xf, yi, h, dydx):
""" Midpoint Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.Midpoint(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
y_next_hl = yi + dydx(xi,yi) * h / 2
yi = yi + dydx(xi + h/2, y_next_hl) * h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def RK2(self, xi, xf, yi, h, a1, a2, p1, q11, dydx):
""" Second Order Runge Kutta Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
a1, a2, p1, q11 = Calculation constants.
@Prop:
a1 + a2 = 1
a2 . p1 = 1/2
a2 . q11 = 1/2
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.RK2(0,5,2,0.2,1/2,1/2,1,1,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
k1 = dydx(xi, yi)
k2 = dydx(xi + p1 * h, yi + q11 * k1 * h)
yi = yi + (a1*k1 + a2*k2)*h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def RK3(self, xi, xf, yi, h, dydx):
""" Third Order Runge Kutta Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.RK3(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
k1 = dydx(xi, yi)
k2 = dydx(xi + 1/2 * h, yi + 1/2 * k1 * h)
k3 = dydx(xi + h, yi - k1*h + 2*k2*h)
yi = yi + 1/6 * (k1 + 4*k2 + k3)*h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def RK4(self, xi, xf, yi, h, dydx):
""" Fourth Order Runge Kutta Method for ODE.
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi = Initial value of y.
h = Step size.
dydx : Target function's derivative function
which argument depend on 'x and y'.
@ Example :
def df(x,y):
return (2 x + y)
...
solver = ODE()
solver.RK4(0,5,2,0.2,df)
...
Return :
--------
x_arr, y_arr : Array of x and y point(s).
"""
x_arr, y_arr = [xi], [yi]
while (xi + h <= xf):
if (xi + h ) > xf:
h = xf - xi
k1 = dydx(xi, yi)
k2 = dydx(xi + 1/2 * h, yi + 1/2 * k1 * h)
k3 = dydx(xi + 1/2 * h, yi + 1/2 * k2 * h)
k4 = dydx(xi + h , yi + k3 * h)
yi = yi + 1/6 * (k1 + 2*k2 + 2*k3 + k4)*h
xi += h
x_arr.append(xi)
y_arr.append(yi)
return x_arr, y_arr
def SystemRK4(self, xi, xf, yi, h, dydx):
""" Forth Order Runge Kutta Method for System of ODE.
@Note : yi and dydx should be array.
`Derivative functions parameter should be written
w.r.t args. Description in '@Args'.`
Arguments :
-------------
xi = Initial value of x.
xf = Final value of x.
yi | |
<filename>build/lib/bilateralshapley/bilateralshapley.py
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 19 11:39:17 2017
@title: Bilateral Shapely Value
@author: <NAME>
"""
import networkx as nx
from operator import attrgetter
class BSV:
'''
Tasks:
1. Establish two networks graphs based on agent attributes
2. Execute Bilateral Shapely Value
3. Store results in three ways, list of coaltions,
list of coalitions with attributes,
dictionary of coalitions and their attributes.
Purpose: Pass in the agents and specificed attributes and run through
the bilateral Shapely value
'''
def __init__(self, agents, power_attribute, preference_attribute,
efficiency =1.5, agent_id="unique_id",
compromise=0.95, verbose=True):
self.agent_list = agents
self.net = nx.Graph()
# create duplicate graph to reference for heirarchial networks
self.orignet = nx.Graph()
# determines when coalitions optimized
self.coalesced = False
self.efficiency = efficiency
if compromise > 1.0 or compromise < 0.0:
raise ValueError("Compromise parameter must be a vlaue between 0.0 \
and 1.0")
self.comp = compromise
# verbose parameter for print updates
self.verbose = verbose
# create empty dictionary for subnets
self.subnets = {}
# variable to make process more computationally efficient
self.most = 0
# use attrgetter to extract inputed strings
agent_id = attrgetter(agent_id)
power = attrgetter(power_attribute)
preference = attrgetter(preference_attribute)
#normalize preference
pref_list = []
for a in self.agent_list:
pref_list.append(preference(a))
max_pref = max(pref_list)
# add agents into nodes- requires two
# so can reference individual agent
for a in self.agent_list:
setattr(a, preference_attribute, preference(a)/max_pref)
node = agent_id(a)
# May need to undo later for final result
if isinstance(node, str):
pass
else:
node = str(node)
# primary
self.net.add_node(node)
# duplicate
self.orignet.add_node(node)
# primary
self.net.node[node]["power"] = power(a)
# duplicate
self.orignet.node[node]["power"] = power(a)
# primary
self.net.node[node]["preference"] = preference(a)
# duplicate
self.orignet.node[node]['preference'] = preference(a)
# primary
self.net.node[node]["maybe_mates"] = []
# duplicate
self.orignet.node[node]["maybe_mates"] = []
# primary
self.net.node[node]['g_dis'] = False
# duplicate
self.orignet.node[node]["g_dis"] = False
# Results storage objects
self.result = None
self.result_verbose = None
self.subresults = None
# Run the algorithm
self.execution()
'''
HELPER FUNCTION
1. Make subnet - Allows for detailed look of each coalition
'''
def make_subnets(self, newname, node1, node2, newpref):
'''
Tasks:
1. Make network graph of each coalition
2. Store each agents power and preference variables
'''
# empty list to track groups in subnetwork
group_list = []
# Get agent names out
start = 0
stop = 0
# Iterate through name to look for periods and append to group list
for l in range(len(newname)):
if newname[l] == '.':
stop = l
group = newname[start: stop]
group_list.append(group)
start = stop + 1
# get last name
group_list.append(newname[start:])
# print (tribename)
# print (tribe_list)
# check if both sub_network exists
self.subnets[newname] = nx.Graph()
# add each node with attributes
for each in group_list:
# calculate new preference
diff = self.orignet.node[each]['preference'] \
+ ((newpref - self.orignet.node[each]['preference'])
* self.comp)
self.subnets[newname].add_node(each)
self.subnets[newname].node[each]['preference'] = diff
self.orignet.node[each]['preference'] = diff
# update reference dictionary with
# new afFinity value based on alliances
self.subnets[newname].node[each]['power'] = \
self.orignet.node[each]['power']
self.subnets[newname].node[each]['maybe_mates'] = \
self.orignet.node[each]['maybe_mates']
self.subnets[newname].node[each]['g_dis'] = \
self.orignet.node[each]['g_dis']
if node1 in self.subnets.keys():
self.subnets.pop(node1)
if node2 in self.subnets.keys():
self.subnets.pop(node2)
'''
MAIN FUNCTIONS
1. assess_coalitions: determines best matches for every agent
2. make_alliance: find best option based on
all the combinations and form link
3. new_node: create new nodes form aligned
agents and remove them as a stand alon agent
'''
def assess_coalitions(self, network):
'''
Tasks:
1. As a node iterate through each node
2. assess the expected utility of a coalition formation
3. find the best possible match
4. store as an attirbute of the node
Purpose: To look over all combinations and find the best alliance
'''
# iterate over graph and create links with those most like you
for n1, d1 in network.nodes(data=True):
# reset maybe_mates for each round of coalition formation
d1["maybe_mates"] = []
# iterate over nodes to find allies
for n2, d2 in network.nodes(data=True):
# ensure nodes does not link to self
if n1 != n2:
# determine expected utility of alliance
pot_eu = ((d1['power'] + d2['power']) * self.efficiency) \
* (1 - (abs(d1['preference'] - d2['preference'])))
# determine bilateral shapely value for both agents
shape1 = 0.5 * d1['power'] + 0.5 * (pot_eu - (d2['power']))
shape2 = 0.5 * d2['power'] + 0.5 * (pot_eu - (d1['power']))
# ensure no alliance is made which result
# in a decrease in either parties utility
if shape1 > d1['power'] and shape2 > d2['power']:
# if a coalition increases both utilities
# then add to list of node1
d1['maybe_mates'].append([shape1, n2, pot_eu])
# sort list of possible alliances for highest shapely value
d1['maybe_mates'].sort(key=lambda x: x[0], reverse=True)
# ensures unecessary calculations are prevented in
# make alliance function (belowO by finding max length of tribes
if len(d1['maybe_mates']) > self.most:
self.most = len(d1['maybe_mates'])
# append index number (i.e. rank) of each tribe to list
for each in d1['maybe_mates']:
each.append(d1['maybe_mates'].index(each))
def make_alliance(self, network, level):
'''
Tasks:
1. iterate through each possible alliance
increasing index value (poss variable)
2. if best bet form link
Purpose: A computationally efficient way to find
best link for each node
'''
# empty list to keep track of nodes with edges
allied = []
# iterate through nodes
poss = 1
# keep creating links while value is shorter
# then the greatest number of possible alliances
while poss < self.most:
# iterate through nodes
for each in network.nodes(data=True):
# if in allied list pass (already has an edge)
if each[0] in allied:
pass
# look at data of potential allies
else:
# iterate through possible mates 1 index is name
for e in each[1]['maybe_mates']:
if e[3] <= poss:
# ensure node isnot already allied
if e[1] not in allied and each[0] not in allied:
# iterate through possible mates
for i in network.node[e[1]]["maybe_mates"]:
# see if node is a possible mate
if i[1] == each[0]:
# ensure less than current value
if network.node[e[1]]['maybe_mates'].\
index(i) <= poss:
# make alliance
network.add_edge(each[0], e[1])
# add both nodes to list
allied.append(each[0])
allied.append(e[1])
# increase value of poss to explore next best option
poss += 1
if level == 'two':
self.coalesced = True
# if no more alliances are made change variable to stop
if len(allied) == 0:
self.coalesced = True
def new_node(self, network):
'''
Tasks:
1. Calculate new agents based on those
agents who established a link
2. Form subnet of agents based on agents who joined
3. Remove individuals agents form network
who are not part of coalition
Purpose: Create heirarchies of networks
'''
# iterate through each group which has an edge and put in list
new_nodes = []
new_pows = []
new_prefs = []
for one, two in network.edges():
# use aggressive caching
prefA = network.node[one]['preference']
prefB = network.node[two]['preference']
powA = network.node[one]['power']
powB = network.node[two]['power']
# calculate new preference
newpref = ((prefA * powA) + (prefB * powB))/(powA + powB)
new_prefs.append(newpref)
# calculate new power
newpow = (powA + powB) * self.efficiency
new_pows.append(newpow)
# new node name
new_nodes.append([one, two])
# iterate through list of alliances
for i in new_nodes:
# print (i)
# combine goroup names into a new name
newname = i[0] + "." + i[1]
# print (newname)
# get index of item
idx = new_nodes.index(i)
# add the new combined node
network.add_node(newname)
# add new power attribute
network.node[newname]['power'] = new_pows[idx]
# add new affinity attributes
network.node[newname]['preference'] = new_prefs[idx]
# create empy possible mates key
network.node[newname]['maybe_mates'] = []
network.node[newname]['g_dis'] = 'False'
##################################################################
# SUBNET COMMAND
################################################################
# make new subnetwork
self.make_subnets(newname, i[0], i[1], new_prefs[idx])
# remove node from graph
network.remove_node(i[0])
network.remove_node(i[1])
def check_alliances(self, subs, nets):
'''
Tasks:
1. Determine if any memeber of a coalition wants to leave
2. Remove from coalition and add back into population
Purpose: Ensure each agent in the colation still wants to belong
'''
dis = []
c = 0
for key, sub in subs.items():
for group in sub.nodes(data=True):
# print (nets.node[key]['power'])
# print (list(sub.nodes()), group[0])
# determine potential utility for primary group
# and each sub agent wihtin the | |
-------
Signal S_out(i,j) corrected for memory effect
Notes
-----
* approximation first read-out is obviously wrong, but first readout is
not used anyway...
* error estimate not implemented
"""
if verbose:
print('(1) Perform memory correction (Reticon detectors)')
smr.errorType = 'A'
#
# read memory correction values
#
with h5py.File('/SCIA/share/nadc_tools/MEMcorr.h5', 'r') as fid:
dset = fid['MemTable']
memtbl = dset[:]
#
# apply memory correction
#
id_array = np.arange(smr.channelSize)
for nch in range(5):
ipx = id_array + nch * smr.channelSize
coaddf = smr.coaddf[ipx].max()
sign = np.rint(smr.spectra[0, ipx]).astype('uint16')
for nspec in range(smr.numSpectra):
corr = memtbl[nch, sign]
sign = np.rint(smr.spectra[nspec, ipx]).astype('uint16')
if coaddf > 1:
for _ in range(1, coaddf):
corr += memtbl[nch, sign]
corr /= coaddf
smr.spectra.data[nspec, ipx] -= corr
@staticmethod
def nonLinearity(smr, verbose=False):
"""
(2) Non-Linearity correction.
Parameters
----------
c_nlin : non-linearity correction parameters [i,j] (C1/C2)
Returns
-------
Signal S_out(i,j) corrected for non-linearity effect
Notes
-----
* error estimate not implemented
"""
if verbose:
print('(2) Perform non-linearity correction (Epitaxx detectors)')
smr.errorType = 'A'
#
# read non-linearity correction values
#
with h5py.File('/SCIA/share/nadc_tools/NLcorr.h5', 'r') as fid:
dset = fid['CurveIndex']
curveIndex = dset[:]
dset = fid['nLinTable']
nlintbl = dset[:]
#
# apply non-linearity correction
#
id_array = np.arange(smr.channelSize)
for nch in range(5, 8):
pixelList = id_array + nch * smr.channelSize
curves = curveIndex[nch, id_array]
for nspec in range(smr.numSpectra):
sign = np.rint(smr.spectra[nspec, pixelList]).astype('uint16')
smr.spectra.data[nspec, pixelList] -= nlintbl[curves, sign]
@staticmethod
def backGround(smr, verbose=False):
"""
(3) Background Signal correction, consists of the dark current (DC)
and the thermal background (BG_term).
BS(i,j) = coaddf * pet * (DC + c_ice * QE * BG_therm)
Parameters
----------
coaddf : number of measurements co-added [i,j] (C3)
pet : pixel exposure time [i,j] (C3)
DC : dark current (C2/C3)
c_ice : transmission coefficient of the ice layer (C2)
QE : quantum efficiency of the detector (C1/C2)
BG_term : thermal background,
depends on T_det, T_opt, T_cal and T_grating (C3)
Returns
-------
Signal S_out(i,j) corrected for background signal (i,j)
Notes
-----
* error estimate not implemented
"""
if verbose:
print('(3) Perform subtraction of dark signal')
smr.errorType = 'A'
# make a copy to correct Epitaxx PET without modifying the SMR object
pet = smr.pet.copy()
pet[5 * smr.channelSize:] -= 1.18125e-3
#
# read dark correction values
#
with h5py.File('/SCIA/SDMF31/sdmf_dark.h5', 'r') as fid:
grp = fid['/DarkFit']
dset = grp['metaTable']
mtbl = dset[:]
orbit = mtbl['absOrbit']
# reject these orbits
orbit[np.where(mtbl['stateCount'] < 3)] = 999999
metaIndx = np.argmin(orbit - smr.absOrbit)
dset = grp['analogOffset']
ao = dset[metaIndx, :]
dset = grp['darkCurrent']
lc = dset[metaIndx, :]
corr = ao + pet * lc
with h5py.File('/SCIA/SDMF30/sdmf_simudark.h5', 'r') as fid:
grp = fid['/ch8']
dset = grp['orbitList']
orbitList = dset[:]
metaIndx = np.argmin(abs(orbitList - smr.absOrbit))
dset = grp['metaTable']
mtbl = dset[:]
mtbl = dset[metaIndx]
dset = grp['ao']
ao = dset[:, metaIndx]
dset = grp['lc']
lc = dset[:, metaIndx]
dset = grp['amp1']
amp1 = dset[:, metaIndx]
orbvar = cos(2 * pi * (mtbl['PHASE1'] + smr.mtbl['orbitPhase'])) \
+ mtbl['AMP2'] * cos(4 * pi * (mtbl['PHASE2']
+ smr.mtbl['orbitPhase']))
# orbsig = cos(2 * pi * (mtbl['PHASE1'] + smr.mtbl['orbitPhase'])) \
# + mtbl['SIG_AMP2'] * cos(4 * pi * (mtbl['PHASE2']
# + smr.mtbl['orbitPhase']))
indx = 7 * smr.channelSize + np.arange(smr.channelSize)
corr[indx] = ao + pet[indx] * (lc + orbvar * amp1)
smr.spectra -= corr
#
# masked invalid pixels
#
i_masked = smr.spectra.mask.sum()
tmp = np.array([~(np.isfinite(corr))] * smr.numSpectra)
smr.spectra = ma.masked_where(tmp, smr.spectra, copy=False)
del tmp
if verbose:
masked = smr.spectra.mask.sum()
print('* Info: masked %6.1f pixels/spectrum with invalid darks'
% ((masked - i_masked) / float(smr.numSpectra)))
i_masked = masked
smr.spectra = ma.masked_less(smr.spectra, 1, copy=False)
if verbose:
masked = smr.spectra.mask.sum()
print('* Info: masked %6.1f pixels/spectrum with too large darks'
% ((masked - i_masked) / float(smr.numSpectra)))
i_masked = masked
@staticmethod
def strayLight(smr, verbose=False):
"""
(4) Stray Light correction
Parameters
----------
M_stray : stray light correction matrix [i,j,x,y] (C1/C2)
S_in : rebin(S_in(i,j) + missing spectrum) [x,y]
Returns
-------
Signal S_out(i,j) corrected for stray light
Notes
-----
* error estimate not implemented
"""
if verbose:
print('(4) Perform subtraction of spectral stray-light')
smr.errorType = 'A'
#
# read stray-light correction matrix
#
with h5py.File('/SCIA/share/nadc_tools/Straylight.h5', 'r') as fid:
dset = fid['strayMatrix']
strayMatrix = dset[:]
dset = fid['strayGhost']
strayGhost = dset[:]
dset = fid['grid_in']
grid_in = dset[:]
dset = fid['grid_out']
grid_out = dset[:]
# calculate derivative of grid_out
deriv_out = (np.roll(grid_out, -1) - np.roll(grid_out, 1))/2.
deriv_out[0] = (4 * grid_out[1] - 3 * grid_out[0] - grid_out[2])/2.
deriv_out[-1] = (3 * grid_out[-1] - 4 * grid_out[-2] + grid_out[-3])/2.
# obtain lower and upper indices for regridding, per channel
low_indx = np.zeros(grid_in.shape)
high_indx = np.zeros(grid_in.shape)
input_ch = np.floor(grid_in / smr.channelSize)
for nc in range(smr.numChannels):
w = (input_ch == nc)
grid_ch = grid_in[w]
# trick to omit round-off errors (fast - only integer manipulation)
ll = np.empty(grid_ch.shape, dtype=np.uint16)
ll[1:] = (grid_ch[:-1] + grid_ch[1:]).astype('uint16')
ll[(ll % 2) == 1] += 1
ll //= 2
ll[0] = nc * smr.channelSize
ul = np.roll(ll, -1)
ul[-1] = (nc+1) * smr.channelSize
low_indx[w] = ll
high_indx[w] = ul
# reduce the spectrum, according to grid_in
# scale_mask: compensates the reduced spectrum for masked read-outs
# fillings: compensates for the PET w.r.t. 1/16 sec
spec_r = np.zeros((smr.numSpectra, grid_in.shape[0]), dtype='float64')
for ni in range(grid_in.shape[0]):
num = ma.count(smr.spectra[:, low_indx[ni]:high_indx[ni]], axis=1)
num[num < 1] = 1
scale_mask = num / float(high_indx[ni] - low_indx[ni])
fillings = 16 * smr.pet[grid_in[ni]]
spec_r[:, ni] = smr.spectra[:,
low_indx[ni]:high_indx[ni]].sum(axis=1)
spec_r[:, ni] /= (fillings * scale_mask)
# print(ni, low_indx[ni], high_indx[ni], spec_r[120,ni],
# scale_mask[120], fillings, deriv_out[ni])
# reverse channel 2 (using the numpy 'view' method)
tmp = spec_r[:, (input_ch == 1)]
spec_r[:, (input_ch == 1)] = tmp[:, ::-1]
# obtain straylight spectrum
stray_r = np.dot(spec_r, np.transpose(strayMatrix))
# correct for sampling distance of the output grid
stray_r /= deriv_out
# resample straylight spectrum to SCIA spectrum
stray = np.zeros((smr.numSpectra, smr.numPixels), dtype='float64')
for ns in range(smr.numSpectra):
stray[ns, :] = np.interp(np.arange(smr.numPixels, dtype='float64'),
grid_out, stray_r[ns, :])
# blank out blinded pixels
stray[:, smr.blinded] = 0.
# scale to original PET of spectra
stray *= ((16 * smr.pet * smr.coaddf) / smr.coaddf.max())
# reverse channel 2 (using the numpy 'view' method)
tmp = stray[:, 1024:2048]
tmp[:, :] = tmp[:, ::-1]
# calculate stray-light contribution of the ghosts
ghosts = np.zeros((smr.numSpectra, smr.numPixels), dtype='float64')
for ng in range(strayGhost.shape[0]):
pp = np.arange(strayGhost[ng, 3], strayGhost[ng, 5], dtype=int)
pos = np.polyval(strayGhost[ng, 2::-1], pp)
fact = np.polyval(strayGhost[ng, 10:6:-1], pp)
mask = ((pos >= strayGhost[ng, 4]) & (pos <= strayGhost[ng, 6])
& (fact > 0))
pos = pos[mask]
ghost = fact[mask] * smr.spectra[:, pp[mask]]
if pos[0] > pos[-1]:
pos = pos[::-1]
ghost = ghost[:, ::-1]
pixels = np.arange(int(pos[0]), int(pos[-1]), dtype=int)
for ns in range(smr.numSpectra):
ghosts[ns, pixels] += np.interp(pixels, pos, ghost[ns, :])
# for ni in range(smr.numPixels):
# print(ni, smr.spectra[120,ni], stray[120,ni], ghosts[120,ni])
# blank out blinded pixels
ghosts[:, smr.blinded] = 0.
# subtract straylight from spectra
smr.spectra -= (stray + ghosts)
@staticmethod
def fitParam(smr, verbose=False):
"""
(5) Correct Sun (state 62) for the intensity change during the scan
Parameters
----------
* Read fit parameters from sun_fitpars.h5
Returns
-------
* Sun measurements for intensity change during scan (Diffuser/ESM)
Notes
-----
* error estimate not implemented
"""
if verbose:
print('(5) Perform correction for diffuser effects')
smr.errorType = 'M'
#
# read fit parameters for the correction of diffuser effects
#
with h5py.File('/SCIA/SDMF31/Auxiliary/sun_fitpars.h5', 'r') as fid:
dset = fid['fit_parameters']
fit_param = dset[:]
jd = smr.julianDay.mean() - fit_param[0, 0]
saa = smr.sunAzim[1:-1].mean() - fit_param[1, 0]
sza = smr.sunElev - fit_param[2, 0]
saa_grid = np.arange(fit_param[3, 0]) / (fit_param[3, 0] - 1) \
* (fit_param[5, 0]-fit_param[4, 0]) + fit_param[4, 0] \
- fit_param[1, 0]
sza_grid = np.arange(fit_param[6, 0]) / (fit_param[6, 0] - 1) \
* (fit_param[8, 0]-fit_param[7, 0]) + fit_param[7, 0] \
- fit_param[2, 0]
for ip in range(smr.numPixels):
if fit_param[0, ip] == 0:
continue
np0 = 19
np1 = 19 + fit_param[3, ip]
saa_lut = fit_param[np0:np1, ip]
np0 += fit_param[3, ip]
np1 += fit_param[6, ip]
sza_lut = fit_param[np0:np1, ip]
saa_val = np.interp(saa, saa_grid, saa_lut)
sza_val = np.interp(sza, sza_grid, sza_lut)
slope | |
min_tls_version)
if tls13 is not None:
pulumi.set(__self__, "tls13", tls13)
@property
@pulumi.getter
def ciphers(self) -> Optional[Sequence[str]]:
"""
List of SSL/TLS ciphers to associate with this certificate.
"""
return pulumi.get(self, "ciphers")
@property
@pulumi.getter
def http2(self) -> Optional[str]:
"""
Whether or not HTTP2 should be supported. Valid values are `"on"` or `"off"`.
"""
return pulumi.get(self, "http2")
@property
@pulumi.getter(name="minTlsVersion")
def min_tls_version(self) -> Optional[str]:
"""
Lowest version of TLS this certificate should
support. Valid values are `"1.0"`, `"1.1"`, `"1.2"` and `"1.3"`.
"""
return pulumi.get(self, "min_tls_version")
@property
@pulumi.getter
def tls13(self) -> Optional[str]:
"""
Whether or not TLSv1.3 should be supported. Valid values are `"on"` or `"off"`.
"""
return pulumi.get(self, "tls13")
@pulumi.output_type
class CustomSslCustomSslOptions(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bundleMethod":
suggest = "bundle_method"
elif key == "geoRestrictions":
suggest = "geo_restrictions"
elif key == "privateKey":
suggest = "private_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CustomSslCustomSslOptions. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CustomSslCustomSslOptions.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CustomSslCustomSslOptions.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bundle_method: Optional[str] = None,
certificate: Optional[str] = None,
geo_restrictions: Optional[str] = None,
private_key: Optional[str] = None,
type: Optional[str] = None):
"""
:param str bundle_method: Method of building intermediate certificate chain. A ubiquitous bundle has the highest probability of being verified everywhere, even by clients using outdated or unusual trust stores. An optimal bundle uses the shortest chain and newest intermediates. And the force bundle verifies the chain, but does not otherwise modify it. Valid values are `ubiquitous` (default), `optimal`, `force`.
:param str certificate: Certificate certificate and the intermediate(s)
:param str geo_restrictions: Specifies the region where your private key can be held locally. Valid values are `us`, `eu`, `highest_security`.
:param str private_key: Certificate's private key
:param str type: Whether to enable support for legacy clients which do not include SNI in the TLS handshake. Valid values are `legacy_custom` (default), `sni_custom`.
"""
if bundle_method is not None:
pulumi.set(__self__, "bundle_method", bundle_method)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if geo_restrictions is not None:
pulumi.set(__self__, "geo_restrictions", geo_restrictions)
if private_key is not None:
pulumi.set(__self__, "private_key", private_key)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="bundleMethod")
def bundle_method(self) -> Optional[str]:
"""
Method of building intermediate certificate chain. A ubiquitous bundle has the highest probability of being verified everywhere, even by clients using outdated or unusual trust stores. An optimal bundle uses the shortest chain and newest intermediates. And the force bundle verifies the chain, but does not otherwise modify it. Valid values are `ubiquitous` (default), `optimal`, `force`.
"""
return pulumi.get(self, "bundle_method")
@property
@pulumi.getter
def certificate(self) -> Optional[str]:
"""
Certificate certificate and the intermediate(s)
"""
return pulumi.get(self, "certificate")
@property
@pulumi.getter(name="geoRestrictions")
def geo_restrictions(self) -> Optional[str]:
"""
Specifies the region where your private key can be held locally. Valid values are `us`, `eu`, `highest_security`.
"""
return pulumi.get(self, "geo_restrictions")
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> Optional[str]:
"""
Certificate's private key
"""
return pulumi.get(self, "private_key")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Whether to enable support for legacy clients which do not include SNI in the TLS handshake. Valid values are `legacy_custom` (default), `sni_custom`.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class CustomSslCustomSslPriority(dict):
def __init__(__self__, *,
id: Optional[str] = None,
priority: Optional[int] = None):
if id is not None:
pulumi.set(__self__, "id", id)
if priority is not None:
pulumi.set(__self__, "priority", priority)
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
return pulumi.get(self, "priority")
@pulumi.output_type
class DevicePostureRuleInput(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "requireAll":
suggest = "require_all"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DevicePostureRuleInput. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DevicePostureRuleInput.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DevicePostureRuleInput.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
domain: Optional[str] = None,
enabled: Optional[bool] = None,
exists: Optional[bool] = None,
id: Optional[str] = None,
operator: Optional[str] = None,
path: Optional[str] = None,
require_all: Optional[bool] = None,
running: Optional[bool] = None,
sha256: Optional[str] = None,
thumbprint: Optional[str] = None,
version: Optional[str] = None):
"""
:param str domain: = (Required) The domain that the client must join.
:param bool enabled: = (Required) True if the firewall must be enabled.
:param bool exists: Checks if the file should exist.
:param str id: The Teams List id.
:param str operator: = (Required) The version comparison operator in (>,>=,<,<=,==)
:param str path: The path to the application.
:param bool require_all: = (Required) True if all drives must be encrypted.
:param bool running: Checks if the application should be running.
:param str sha256: The sha256 hash of the file.
:param str thumbprint: The thumbprint of the application certificate.
:param str version: = (Required) The operating system semantic version.
"""
if domain is not None:
pulumi.set(__self__, "domain", domain)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if exists is not None:
pulumi.set(__self__, "exists", exists)
if id is not None:
pulumi.set(__self__, "id", id)
if operator is not None:
pulumi.set(__self__, "operator", operator)
if path is not None:
pulumi.set(__self__, "path", path)
if require_all is not None:
pulumi.set(__self__, "require_all", require_all)
if running is not None:
pulumi.set(__self__, "running", running)
if sha256 is not None:
pulumi.set(__self__, "sha256", sha256)
if thumbprint is not None:
pulumi.set(__self__, "thumbprint", thumbprint)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def domain(self) -> Optional[str]:
"""
= (Required) The domain that the client must join.
"""
return pulumi.get(self, "domain")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
= (Required) True if the firewall must be enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def exists(self) -> Optional[bool]:
"""
Checks if the file should exist.
"""
return pulumi.get(self, "exists")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The Teams List id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def operator(self) -> Optional[str]:
"""
= (Required) The version comparison operator in (>,>=,<,<=,==)
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
The path to the application.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter(name="requireAll")
def require_all(self) -> Optional[bool]:
"""
= (Required) True if all drives must be encrypted.
"""
return pulumi.get(self, "require_all")
@property
@pulumi.getter
def running(self) -> Optional[bool]:
"""
Checks if the application should be running.
"""
return pulumi.get(self, "running")
@property
@pulumi.getter
def sha256(self) -> Optional[str]:
"""
The sha256 hash of the file.
"""
return pulumi.get(self, "sha256")
@property
@pulumi.getter
def thumbprint(self) -> Optional[str]:
"""
The thumbprint of the application certificate.
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
= (Required) The operating system semantic version.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class DevicePostureRuleMatch(dict):
def __init__(__self__, *,
platform: Optional[str] = None):
"""
:param str platform: The platform of the device. Valid values are `windows`, `mac`, `linux`, `android`, and `ios`.
"""
if platform is not None:
pulumi.set(__self__, "platform", platform)
@property
@pulumi.getter
def platform(self) -> Optional[str]:
"""
The platform of the device. Valid values are `windows`, `mac`, `linux`, `android`, and `ios`.
"""
return pulumi.get(self, "platform")
@pulumi.output_type
class HealthcheckHeader(dict):
def __init__(__self__, *,
header: str,
values: Sequence[str]):
"""
:param str header: The header name.
:param Sequence[str] values: A list of string values for the header.
"""
pulumi.set(__self__, "header", header)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def header(self) -> str:
"""
The header name.
"""
return pulumi.get(self, "header")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
A list of string values for the header.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class IpListItem(dict):
def __init__(__self__, *,
value: str,
comment: Optional[str] = None):
"""
:param str value: The IPv4 address, IPv4 CIDR or IPv6 CIDR. IPv6 CIDRs are limited to a maximum of /64.
:param str comment: A note that can be used to annotate the item.
"""
pulumi.set(__self__, "value", value)
if comment is not None:
pulumi.set(__self__, "comment", comment)
@property
@pulumi.getter
def value(self) -> str:
"""
The IPv4 address, IPv4 CIDR or IPv6 CIDR. IPv6 CIDRs are limited to a maximum of /64.
"""
return pulumi.get(self, "value")
@property
@pulumi.getter
def comment(self) -> Optional[str]:
"""
A note that can be used to | |
import hashlib
import itertools
import logging
from django.utils.translation import ugettext as _
import psycopg2
import sqlalchemy
from memoized import memoized
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import Index, PrimaryKeyConstraint
from corehq.apps.userreports.adapter import IndicatorAdapter
from corehq.apps.userreports.exceptions import (
ColumnNotFoundError,
TableRebuildError,
translate_programming_error,
)
from corehq.apps.userreports.sql.columns import column_to_sql
from corehq.apps.userreports.util import get_table_name
from corehq.sql_db.connections import connection_manager
from corehq.util.soft_assert import soft_assert
from corehq.util.test_utils import unit_testing_only
logger = logging.getLogger(__name__)
engine_metadata = {}
def get_metadata(engine_id):
return engine_metadata.setdefault(engine_id, sqlalchemy.MetaData())
class IndicatorSqlAdapter(IndicatorAdapter):
def __init__(self, config, override_table_name=None, engine_id=None):
super(IndicatorSqlAdapter, self).__init__(config)
self.engine_id = engine_id or config.engine_id
self.session_helper = connection_manager.get_session_helper(self.engine_id)
self.session_context = self.session_helper.session_context
self.engine = self.session_helper.engine
self.override_table_name = override_table_name
@property
def table_id(self):
return self.config.table_id
@property
def display_name(self):
return self.config.display_name
@memoized
def get_table(self):
return get_indicator_table(
self.config, get_metadata(self.engine_id), override_table_name=self.override_table_name
)
@property
def table_exists(self):
return self.engine.has_table(self.get_table().name)
@memoized
def get_sqlalchemy_orm_table(self):
table = self.get_table()
Base = declarative_base(metadata=get_metadata(self.engine_id))
class TemporaryTableDef(Base):
__table__ = table
return TemporaryTableDef
def _apply_sql_addons(self):
if self.config.sql_settings.citus_config.distribution_type:
self._distribute_table()
def _distribute_table(self):
config = self.config.sql_settings.citus_config
self.session_helper.Session.remove()
if not self.session_helper.is_citus_db:
# only do this if the database contains the citus extension
return
from custom.icds_reports.utils.migrations import (
create_citus_distributed_table, create_citus_reference_table
)
with self.engine.begin() as connection:
if config.distribution_type == 'hash':
if config.distribution_column not in self.get_table().columns:
raise ColumnNotFoundError("Column '{}' not found.".format(config.distribution_column))
create_citus_distributed_table(connection, self.get_table().name, config.distribution_column)
elif config.distribution_type == 'reference':
create_citus_reference_table(connection, self.get_table().name)
else:
raise ValueError("unknown distribution type: %r" % config.distribution_type)
return True
def rebuild_table(self, initiated_by=None, source=None, skip_log=False):
self.log_table_rebuild(initiated_by, source, skip=skip_log)
self.session_helper.Session.remove()
try:
rebuild_table(self.engine, self.get_table())
self._apply_sql_addons()
except ProgrammingError as e:
raise TableRebuildError('problem rebuilding UCR table {}: {}'.format(self.config, e))
finally:
self.session_helper.Session.commit()
def build_table(self, initiated_by=None, source=None):
self.log_table_build(initiated_by, source)
self.session_helper.Session.remove()
try:
build_table(self.engine, self.get_table())
self._apply_sql_addons()
except ProgrammingError as e:
raise TableRebuildError('problem building UCR table {}: {}'.format(self.config, e))
finally:
self.session_helper.Session.commit()
def drop_table(self, initiated_by=None, source=None, skip_log=False):
self.log_table_drop(initiated_by, source, skip_log)
# this will hang if there are any open sessions, so go ahead and close them
self.session_helper.Session.remove()
with self.engine.begin() as connection:
table = self.get_table()
table.drop(connection, checkfirst=True)
get_metadata(self.engine_id).remove(table)
@unit_testing_only
def clear_table(self):
table = self.get_table()
with self.engine.begin() as connection:
delete = table.delete()
connection.execute(delete)
def get_query_object(self):
"""
Get a sqlalchemy query object ready to query this table
"""
return self.session_helper.Session.query(self.get_table())
def get_distinct_values(self, column, limit):
too_many_values = False
table = self.get_table()
if not table.exists(bind=self.engine):
return [], False
if column not in table.c:
raise ColumnNotFoundError(_(
'The column "{}" does not exist in the report source! '
'Please double check your report configuration.').format(column)
)
column = table.c[column]
query = self.session_helper.Session.query(column).order_by(column).limit(limit + 1).distinct()
result = query.all()
distinct_values = [x[0] for x in result]
if len(distinct_values) > limit:
distinct_values = distinct_values[:limit]
too_many_values = True
return distinct_values, too_many_values
def _best_effort_save_rows(self, rows, doc):
try:
self.save_rows(rows)
except Exception as e:
self.handle_exception(doc, e)
def save_rows(self, rows):
"""
Saves rows to a data source after deleting the old rows
"""
if not rows:
return
# transform format from ColumnValue to dict
formatted_rows = [
{i.column.database_column_name.decode('utf-8'): i.value for i in row}
for row in rows
]
if self.session_helper.is_citus_db:
config = self.config.sql_settings.citus_config
if config.distribution_type == 'hash':
self._by_column_update(formatted_rows)
return
doc_ids = set(row['doc_id'] for row in formatted_rows)
table = self.get_table()
if self.supports_upsert():
queries = [self._upsert_query(table, formatted_rows)]
else:
delete = table.delete().where(table.c.doc_id.in_(doc_ids))
# Using session.bulk_insert_mappings below might seem more inline
# with sqlalchemy API, but it results in
# appending an empty row which results in a postgres
# not-null constraint error, which has been hard to debug.
# In addition, bulk_insert_mappings is less performant than
# the plain INSERT INTO VALUES statement resulting from below line
# because bulk_insert_mappings is meant for multi-table insertion
# so it has overhead of format conversions and multiple statements
insert = table.insert().values(formatted_rows)
queries = [delete, insert]
with self.session_context() as session:
for query in queries:
session.execute(query)
def _by_column_update(self, rows):
config = self.config.sql_settings.citus_config
shard_col = config.distribution_column
table = self.get_table()
rows = sorted(rows, key=lambda row: row[shard_col])
for shard_value, rows_ in itertools.groupby(rows, key=lambda row: row[shard_col]):
formatted_rows = list(rows_)
doc_ids = set(row['doc_id'] for row in formatted_rows)
if self.supports_upsert():
queries = [self._upsert_query(table, formatted_rows)]
else:
delete = table.delete().where(table.c.get(shard_col) == shard_value)
delete = delete.where(table.c.doc_id.in_(doc_ids))
insert = table.insert().values(formatted_rows)
queries = [delete, insert]
with self.session_context() as session:
for query in queries:
session.execute(query)
def supports_upsert(self):
"""Return True if supports UPSERTS else False
Assumes that neither a distribution column (citus) nor doc_id can change.
"""
if self.session_helper.is_citus_db:
# distribution_column and doc_id
return len(self.config.pk_columns) == 2
# doc_id
return len(self.config.pk_columns) == 1
def _upsert_query(self, table, rows):
from sqlalchemy.dialects.postgresql import insert
upsert = insert(table).values(rows)
return upsert.on_conflict_do_update(
constraint=table.primary_key,
set_={
col.name: col for col in upsert.excluded if not col.primary_key
}
)
def bulk_save(self, docs):
rows = []
for doc in docs:
rows.extend(self.get_all_values(doc))
self.save_rows(rows)
def bulk_delete(self, docs):
if self.session_helper.is_citus_db:
config = self.config.sql_settings.citus_config
if config.distribution_type == 'hash':
self._citus_bulk_delete(docs, config.distribution_column)
return
table = self.get_table()
doc_ids = [doc['_id'] for doc in docs]
delete = table.delete(table.c.doc_id.in_(doc_ids))
with self.session_context() as session:
session.execute(delete)
def _citus_bulk_delete(self, docs, column):
"""
When a delete is run on a distrbuted table, it grabs an exclusive write
lock on the entire table unless the shard column is also provided.
This function performs extra work to get the shard column so we are not
blocked on deletes.
"""
# these doc types were blocking the queue but the approach could be applied
# more generally with some more testing
SHARDABLE_DOC_TYPES = ('XFormArchived', 'XFormDeprecated', 'XFormDuplicate', 'XFormError')
table = self.get_table()
doc_ids_to_delete = []
for doc in docs:
if doc.get('doc_type') in SHARDABLE_DOC_TYPES:
# get_all_values ignores duplicate and archived forms because
# the implicit filtering on all data sources filters doc_types
# get_all_values saves no changes to the original doc database
# so we change the doc_type locally to get the sharded column
tmp_doc = doc.copy()
tmp_doc['doc_type'] = 'XFormInstance'
rows = self.get_all_values(tmp_doc)
if rows:
first_row = rows[0]
sharded_column_value = [
i.value for i in first_row
if i.column.database_column_name.decode('utf-8') == column
]
if sharded_column_value:
delete = table.delete().where(table.c.doc_id == doc['_id'])
delete = delete.where(table.c.get(column) == sharded_column_value[0])
with self.session_context() as session:
session.execute(delete)
continue # skip adding doc ID into doc_ids_to_delete
doc_ids_to_delete.append(doc['_id'])
if doc_ids_to_delete:
delete = table.delete().where(table.c.doc_id.in_(doc_ids_to_delete))
with self.session_context() as session:
session.execute(delete)
def delete(self, doc):
self.bulk_delete([doc])
def doc_exists(self, doc):
with self.session_context() as session:
query = session.query(self.get_table()).filter_by(doc_id=doc['_id'])
return session.query(query.exists()).scalar()
class MultiDBSqlAdapter(object):
mirror_adapter_cls = IndicatorSqlAdapter
def __init__(self, config, override_table_name=None):
config.validate_db_config()
self.config = config
self.main_adapter = self.mirror_adapter_cls(config, override_table_name)
self.all_adapters = [self.main_adapter]
engine_ids = self.config.mirrored_engine_ids
for engine_id in engine_ids:
self.all_adapters.append(self.mirror_adapter_cls(config, override_table_name, engine_id))
def __getattr__(self, attr):
return getattr(self.main_adapter, attr)
@property
def table_id(self):
return self.config.table_id
@property
def display_name(self):
return self.config.display_name
def best_effort_save(self, doc, eval_context=None):
for adapter in self.all_adapters:
adapter.best_effort_save(doc, eval_context)
def save(self, doc, eval_context=None):
for adapter in self.all_adapters:
adapter.save(doc, eval_context)
def get_all_values(self, doc, eval_context=None):
return self.config.get_all_values(doc, eval_context)
@property
def run_asynchronous(self):
return self.config.asynchronous
def get_distinct_values(self, column, limit):
return self.main_adapter.get_distinct_values(column, limit)
def build_table(self, initiated_by=None, source=None):
for adapter in self.all_adapters:
adapter.build_table(initiated_by=initiated_by, source=source)
def rebuild_table(self, initiated_by=None, source=None, skip_log=False):
for adapter in self.all_adapters:
adapter.rebuild_table(initiated_by=initiated_by, source=source, skip_log=skip_log)
def drop_table(self, initiated_by=None, source=None, skip_log=False):
for adapter in self.all_adapters:
adapter.drop_table(initiated_by=initiated_by, source=source, skip_log=skip_log)
@unit_testing_only
def clear_table(self):
for adapter in self.all_adapters:
adapter.clear_table()
def save_rows(self, rows):
for adapter in self.all_adapters:
adapter.save_rows(rows)
def bulk_save(self, docs):
for adapter in self.all_adapters:
adapter.bulk_save(docs)
def bulk_delete(self, docs):
for adapter in self.all_adapters:
adapter.bulk_delete(docs)
def doc_exists(self, doc):
return any([
adapter.doc_exists(doc)
for adapter in self.all_adapters
])
class ErrorRaisingIndicatorSqlAdapter(IndicatorSqlAdapter):
def handle_exception(self, doc, exception):
ex = translate_programming_error(exception)
if ex is not None:
raise ex
orig_exception = getattr(exception, 'orig', None)
if orig_exception and isinstance(orig_exception, psycopg2.IntegrityError):
if orig_exception.pgcode == psycopg2.errorcodes.NOT_NULL_VIOLATION:
from corehq.apps.userreports.models import InvalidUCRData
InvalidUCRData.objects.create(
doc_id=doc['_id'],
doc_type=doc['doc_type'],
domain=doc['domain'],
indicator_config_id=self.config._id,
validation_name='not_null_violation',
validation_text='A column in this doc violates an is_nullable constraint'
)
return
super(ErrorRaisingIndicatorSqlAdapter, self).handle_exception(doc, exception)
class ErrorRaisingMultiDBAdapter(MultiDBSqlAdapter):
mirror_adapter_cls = ErrorRaisingIndicatorSqlAdapter
def get_indicator_table(indicator_config, metadata, override_table_name=None):
sql_columns = [column_to_sql(col) for col in indicator_config.get_columns()]
table_name = override_table_name or get_table_name(indicator_config.domain, indicator_config.table_id)
columns_by_col_id = {col.database_column_name.decode('utf-8') for col in indicator_config.get_columns()}
extra_indices = []
citus_config = indicator_config.sql_settings.citus_config
if citus_config.distribution_type == 'hash':
# Create hash index on doc_id for distributed tables
extra_indices.append(Index(
_custom_index_name(table_name, ['doc_id']),
'doc_id',
postgresql_using='hash'
))
for index in indicator_config.sql_column_indexes:
if set(index.column_ids).issubset(columns_by_col_id):
extra_indices.append(Index(
_custom_index_name(table_name, index.column_ids),
*index.column_ids
))
else:
logger.error(f"Invalid index specified on {table_name} ({index.column_ids})")
constraints = [PrimaryKeyConstraint(*indicator_config.pk_columns)]
columns_and_indices = sql_columns + extra_indices + constraints
# todo: needed to add extend_existing=True to support multiple calls to this function for the same table.
# is that valid?
return sqlalchemy.Table(
table_name,
metadata,
extend_existing=True,
*columns_and_indices
)
def _custom_index_name(table_name, column_ids):
base_name = "ix_{}_{}".format(table_name, ','.join(column_ids))
base_hash = hashlib.md5(base_name.encode('utf-8')).hexdigest()
return "{}_{}".format(base_name[:50], base_hash[:5])
def rebuild_table(engine, table):
with engine.begin() as connection:
table.drop(connection, checkfirst=True)
| |
<gh_stars>1-10
# ===============================
# author : <NAME>
# contact: <EMAIL>
# github : github.com/mrluin
# ===============================
import math
import torch
import os
import time
import logging
import json
from exp.sufficient_update.run_manager import *
from utils.common import set_manual_seed
from utils.common import AverageMeter
from utils.common import get_monitor_metric
from utils.logger import save_checkpoint, time_string
from utils.metrics import Evaluator
from modules.mixed_op import MixedEdge
from utils.common import count_parameters
from utils.common import get_list_index
from models.new_model import NewNetwork
__all__ = ['ArchSearchConfig', 'ArchSearchRunManager']
class ArchSearchConfig:
def __init__(self,
arch_init_type, arch_init_ratio,
warmup_lr,
arch_optimizer_type, arch_lr, arch_optimizer_params, arch_weight_decay,
tau_min, tau_max,
#target_hardware, ref_value,
arch_param_update_frequency, arch_param_update_steps, sample_arch_frequency,
reg_loss_type,
reg_loss_params, **kwargs):
# optimizer
self.arch_init_type = arch_init_type
self.arch_init_ratio = arch_init_ratio
self.arch_optimizer_type = arch_optimizer_type
self.arch_lr = arch_lr
self.arch_optimizer_params = arch_optimizer_params
self.arch_weight_decay = arch_weight_decay
self.warmup_lr = warmup_lr
self.tau_min = tau_min
self.tau_max = tau_max
# update
self.arch_param_update_frequency = arch_param_update_frequency
self.arch_param_update_steps = arch_param_update_steps
self.sample_arch_frequency = sample_arch_frequency
# loss related to hardware contraint
self.reg_loss_type = reg_loss_type
self.reg_loss_params = reg_loss_params
# TODO: related computational constraints
# TODO: get rid of
#self.target_hardware = target_hardware
#self.ref_value = ref_value
@property
def config(self):
config = {
#'type' : type(self),
}
for key in self.__dict__:
if not key.startswith('_'):
config[key] = self.__dict__[key]
return config
def get_update_schedule(self, iter_per_epoch):
schedule = {}
for i in range(iter_per_epoch):
if (i+1) % self.sample_arch_frequency == 0:
schedule[i] = self.arch_param_update_steps # iteration i update arch_param self.grad_update_steps times.
return schedule
def build_optimizer(self, params):
if self.arch_optimizer_type == 'adam':
return torch.optim.Adam(params, self.arch_lr, weight_decay=self.arch_weight_decay, **self.arch_optimizer_params)
else: raise ValueError('do not support otherwise torch.optim.Adam')
'''
def add_regularization_loss(self, ce_loss, expected_value=None):
# TODO: related hardware constrain, get rid of.
if expected_value is None:
return ce_loss
if self.reg_loss_type == 'mul#log ':
alpha = self.reg_loss_params.get('alpha', 1)
beta = self.reg_loss_params.get('beta', 0.6)
reg_loss = (torch.log(expected_value) / math.log(self.ref_value)) ** beta
return alpha * ce_loss * reg_loss
elif self.reg_loss_type == 'add#linear':
reg_lambda = self.reg_loss_params.get('lambda', 2e-1)
reg_loss = reg_lambda * (expected_value - self.ref_value) / self.ref_value
return ce_loss + reg_loss
elif self.reg_loss_type is None:
return ce_loss
else:
raise ValueError('do not support {}'.format(self.reg_loss_type))
'''
class ArchSearchRunManager:
def __init__(self,
path, super_net,
run_config: RunConfig,
arch_search_config: ArchSearchConfig,
logger, vis=None):
self.run_manager = RunManager(path, super_net, logger, run_config, out_log=True)
self.arch_search_config = arch_search_config
# arch_parameter init has implemented in SuperNetwork, performs initialization when construct the network.
# init architecture parameters
self.net.init_arch_params(
self.arch_search_config.arch_init_type,
self.arch_search_config.arch_init_ratio
)
# build architecture optimizer
self.arch_optimizer = self.arch_search_config.build_optimizer(self.net.arch_parameters())
self.warmup = True
self.warmup_epoch = 0
self.start_epoch = 0 # start_epoch, warmup_epoch, and total_epoch
self.logger = logger
self.vis = vis
# for update arch_parameters
@property
def net(self):
return self.run_manager.model
def load_model(self, checkpoint_file=None):
# only used in nas_manager
assert checkpoint_file is not None and os.path.exists(checkpoint_file), \
'checkpoint_file can not be found'
if self.run_manager.out_log:
print('=' * 30 + '=>\tLoading Checkpoint {}'.format(checkpoint_file))
if torch.cuda.is_available():
checkpoint = torch.load(checkpoint_file)
else:
checkpoint = torch.load(checkpoint_file, map_location='cpu')
model_dict = self.net.state_dict()
model_dict.update(checkpoint['state_dict'])
self.net.load_state_dict(model_dict)
# TODO: why set new manual seed
new_manual_seed = int(time.time())
set_manual_seed(new_manual_seed)
self.start_epoch = checkpoint['start_epochs']
self.monitor_metric, self.best_monitor = checkpoint['best_monitor']
self.run_manager.optimizer.load_state_dict(checkpoint['weight_optimizer'])
scheduler_dict = self.run_manager.scheduler.state_dict()
scheduler_dict.update(checkpoint['weight_scheduler'])
self.run_manager.scheduler.load_state_dict(scheduler_dict)
self.arch_optimizer.load_state_dict(checkpoint['arch_optimizer'])
self.warm_up = checkpoint['warmup']
if self.run_manager.out_log:
print('=' * 30 + '=>\tLoaded Checkpoint {}'.format(checkpoint_file))
def save_model(self, epoch, is_warmup, is_best, checkpoint_file_name):
# TODO: wheter has arch_scheduler or not
checkpoint = {
'arch_optimizer': self.arch_optimizer.state_dict(),
#'arch_scheduler': self.arch_scheduler.state_dict(), # does not have arch_scheduler
}
# saved in /train_search/ckpt_file
self.run_manager.save_model(epoch, checkpoint, is_warmup=is_warmup, is_best=is_best, checkpoint_file_name=checkpoint_file_name)
''' training related methods '''
def validate(self):
# TODO: use validate method in run_manager, after perform model derivation.
# for validation phrase, not train search phrase, only performing validation
# valid_loader batch_size = test_batch_size
# have already equals to test_batch_size in DataProvider
self.run_manager.run_config.valid_loader.batch_sampler.batch_size = self.run_manager.run_config.train_batch_size
self.run_manager.run_config.valid_loader.batch_sampler.drop_last = False
# TODO: network_level and cell_level decode in net_flops() method
# TODO: valid on validation set (from training set) under train mode
# only have effect on operation related to train mode
# like bn or dropout
loss, acc, miou, fscore = self.run_manager.validate(is_test=False, net=self.net, use_train_mode=True)
# TODO: network flops and network param count should be calculated after the best network derived.
# flops = self.run_manager.net_flops()
# params = count_parameters(self.net)
# target_hardware is None by default
if self.arch_search_config.target_hardware in ['flops', None]:
latency = 0
else:
raise NotImplementedError
return loss, acc, miou, fscore, #flops, params
def warm_up(self, warmup_epochs):
if warmup_epochs <=0 :
self.logger.log('=> warmup close', mode='warm')
#print('\twarmup close')
return
# set optimizer and scheduler in warm_up phase
lr_max = self.arch_search_config.warmup_lr
data_loader = self.run_manager.run_config.train_loader
scheduler_params = self.run_manager.run_config.optimizer_config['scheduler_params']
optimizer_params = self.run_manager.run_config.optimizer_config['optimizer_params']
momentum, nesterov, weight_decay = optimizer_params['momentum'], optimizer_params['nesterov'], optimizer_params['weight_decay']
eta_min = scheduler_params['eta_min']
optimizer_warmup = torch.optim.SGD(self.net.weight_parameters(), lr_max, momentum, weight_decay=weight_decay, nesterov=nesterov)
# set initial_learning_rate in weight_optimizer
#for param_groups in self.run_manager.optimizer.param_groups:
# param_groups['lr'] = lr_max
lr_scheduler_warmup = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer_warmup, warmup_epochs, eta_min)
iter_per_epoch = len(data_loader)
total_iteration = warmup_epochs * iter_per_epoch
self.logger.log('=> warmup begin', mode='warm')
epoch_time = AverageMeter()
end_epoch = time.time()
for epoch in range(self.warmup_epoch, warmup_epochs):
self.logger.log('\n'+'-'*30+'Warmup Epoch: {}'.format(epoch+1)+'-'*30+'\n', mode='warm')
lr_scheduler_warmup.step(epoch)
warmup_lr = lr_scheduler_warmup.get_lr()
self.net.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
accs = AverageMeter()
mious = AverageMeter()
fscores = AverageMeter()
epoch_str = 'epoch[{:03d}/{:03d}]'.format(epoch + 1, warmup_epochs)
time_left = epoch_time.average * (warmup_epochs - epoch)
common_log = '[Warmup the {:}] Left={:} LR={:}'.format(epoch_str, str(timedelta(seconds=time_left)) if epoch!=0 else None, warmup_lr)
self.logger.log(common_log, mode='warm')
end = time.time()
# single_path init
#network_hardwts, network_index = self.net.get_network_arch_hardwts_with_constraint()
#_, aspp_index = self.net.get_aspp_hardwts_index()
#single_path = self.net.sample_single_path(self.run_manager.run_config.nb_layers, aspp_index, network_index)
set_single_path = True
for i, (datas, targets) in enumerate(data_loader):
#print(i)
#print(self.net.single_path)
#if i == 59: # used for debug
# break
if torch.cuda.is_available():
datas = datas.to(self.run_manager.device, non_blocking=True)
targets = targets.to(self.run_manager.device, non_blocking=True)
else:
raise ValueError('do not support cpu version')
data_time.update(time.time()-end)
# TODO: update one architecture sufficiently
# 1. get hardwts and index
# 2. sample single_path, and set single_path
# 3. get arch_sample_frequency
# 4. update single_path per '{:}'.format(sample_arch_frequency) frequency
#if (i+1) % self.arch_search_config.sample_arch_frequency == 0:
# _, network_index = self.net.get_network_arch_hardwts_with_constraint()
# _, aspp_index = self.net.get_aspp_hardwts_index()
# single_path = self.net.sample_single_path(self.run_manager.run_config.nb_layers, aspp_index, network_index)
if (i+1) % self.arch_search_config.sample_arch_frequency == 0:
set_single_path = True
#logits = self.net.single_path_forward(datas, single_path)
logits = self.net.single_path_forward(datas, set_single_path=set_single_path)
set_single_path = False
# TODO: don't add entropy reg in warmup_phase
ce_loss = self.run_manager.criterion(logits, targets)
#entropy_reg = self.net.calculate_entropy(single_path)
#cell_entropy, network_entropy, _ = self.net.calculate_entropy(single_path)
loss = self.run_manager.add_regularization_loss(epoch, ce_loss, None)
# measure metrics and update
evaluator = Evaluator(self.run_manager.run_config.nb_classes)
evaluator.add_batch(targets, logits)
acc = evaluator.Pixel_Accuracy()
miou = evaluator.Mean_Intersection_over_Union()
fscore = evaluator.Fx_Score()
losses.update(loss.data.item(), datas.size(0))
accs.update(acc, datas.size(0))
mious.update(miou, datas.size(0))
fscores.update(fscore, datas.size(0))
self.net.zero_grad()
loss.backward()
self.run_manager.optimizer.step()
batch_time.update(time.time()-end)
end = time.time()
if (i+1) % self.run_manager.run_config.train_print_freq == 0 or i + 1 == iter_per_epoch:
Wstr = '|*WARM-UP*|' + time_string() + '[{:}][iter{:03d}/{:03d}]'.format(epoch_str, i+1, iter_per_epoch)
Tstr = '|Time | [{batch_time.val:.2f} ({batch_time.avg:.2f}) Data {data_time.val:.2f} ({data_time.avg:.2f})]'.format(batch_time=batch_time, data_time=data_time)
Bstr = '|Base | [Loss {loss.val:.3f} ({loss.avg:.3f}) Accuracy {acc.val:.2f} ({acc.avg:.2f}) MIoU {miou.val:.2f} ({miou.avg:.2f}) F {fscore.val:.2f} ({fscore.avg:.2f})]'\
.format(loss=losses, acc=accs, miou=mious, fscore=fscores)
self.logger.log(Wstr+'\n'+Tstr+'\n'+Bstr, 'warm')
#torch.cuda.empty_cache()
epoch_time.update(time.time() - end_epoch)
end_epoch = time.time()
#epoch_str = '{:03d}/{:03d}'.format(epoch+1, self.run_manager.run_config.warmup_epochs)
log = '[{:}] warm :: loss={:.2f} accuracy={:.2f} miou={:.2f} f1score={:.2f}\n'.format(
epoch_str, losses.average, accs.average, mious.average, fscores.average)
self.vis.visdom_update(epoch, 'warmup_loss', [losses.average])
self.vis.visdom_update(epoch, 'warmup_miou', [mious.average])
self.logger.log(log, mode='warm')
'''
# TODO: wheter perform validation after each epoch in warmup phase ?
valid_loss, valid_acc, valid_miou, valid_fscore = self.validate()
valid_log = 'Warmup Valid\t[{0}/{1}]\tLoss\t{2:.6f}\tAcc\t{3:6.4f}\tMIoU\t{4:6.4f}\tF\t{5:6.4f}'\
.format(epoch+1, warmup_epochs, valid_loss, valid_acc, valid_miou, valid_fscore)
#'\tflops\t{6:}M\tparams{7:}M'\
valid_log += 'Train\t[{0}/{1}]\tLoss\t{2:.6f}\tAcc\t{3:6.4f}\tMIoU\t{4:6.4f}\tFscore\t{5:6.4f}'
self.run_manager.write_log(valid_log, 'valid')
'''
# continue warmup phrase
self.warmup = epoch + 1 < warmup_epochs
self.warmup_epoch = self.warmup_epoch + 1
#self.start_epoch = self.warmup_epoch
# To save checkpoint in warmup phase at specific frequency.
if (epoch+1) % self.run_manager.run_config.save_ckpt_freq == 0 or (epoch+1) == warmup_epochs:
state_dict = self.net.state_dict()
# rm architecture parameters because, in warm_up phase, arch_parameters are not updated.
#for key in list(state_dict.keys()):
# if 'cell_arch_parameters' in key or 'network_arch_parameters' in key or 'aspp_arch_parameters' in key:
# state_dict.pop(key)
checkpoint = {
'state_dict': state_dict,
'weight_optimizer' : self.run_manager.optimizer.state_dict(),
'weight_scheduler': self.run_manager.optimizer.state_dict(),
'warmup': self.warmup,
'warmup_epoch': epoch+1,
}
filename = self.logger.path(mode='warm', is_best=False)
save_path = save_checkpoint(checkpoint, filename, self.logger, mode='warm')
# TODO: save_path used to resume last info
def train(self, fix_net_weights=False):
# have config valid_batch_size, and ignored drop_last.
data_loader = self.run_manager.run_config.train_loader
iter_per_epoch = len(data_loader)
total_iteration = iter_per_epoch * self.run_manager.run_config.epochs
self.update_scheduler = self.arch_search_config.get_update_schedule(iter_per_epoch)
if fix_net_weights: # used to debug
data_loader = [(0, 0)] * iter_per_epoch
print('Train Phase close for debug')
# arch_parameter update frequency and times in each iteration.
#update_schedule = self.arch_search_config.get_update_schedule(iter_per_epoch)
# pay attention here, total_epochs include warmup epochs
epoch_time = AverageMeter()
end_epoch = time.time()
# TODO : use start_epochs
# single_path init
#_, network_index = self.net.get_network_arch_hardwts_with_constraint()
#_, aspp_index = self.net.get_aspp_hardwts_index()
#single_path = self.net.sample_single_path(self.run_manager.run_config.nb_layers, aspp_index, network_index)
for epoch in range(self.start_epoch, self.run_manager.run_config.epochs):
self.logger.log('\n'+'-'*30+'Train | |
'batchnorm' ]
# or:
# operations = [ 'relu', 'conv', 'renorm' ]
for operation in operations:
if operation == 'conv':
a = []
for opt_name in [
'param-stddev', 'bias-stddev', 'use-natural-gradient',
'max-change', 'rank-in', 'rank-out', 'num-minibatches-history',
'alpha-in', 'alpha-out', 'num-filters-in', 'num-filters-out',
'height-in','height-out', 'height-subsample-out',
'height-offsets', 'time-offsets', 'required-time-offsets',
'learning-rate-factor', 'l2-regularize' ]:
value = self.config[opt_name]
if value != '':
a.append('{0}={1}'.format(opt_name, value))
conv_opts = ' '.join(a)
configs.append('component name={0}.conv type=TimeHeightConvolutionComponent '
'{1}'.format(name, conv_opts))
configs.append('component-node name={0}.conv component={0}.conv '
'input={1}'.format(name, cur_descriptor))
cur_num_filters = self.config['num-filters-out']
cur_height = self.config['height-out']
elif operation == 'batchnorm':
configs.append('component name={0}.batchnorm type=BatchNormComponent dim={1} '
'block-dim={2} target-rms={3}'.format(
name, cur_num_filters * cur_height, cur_num_filters,
self.config['target-rms']))
configs.append('component-node name={0}.batchnorm component={0}.batchnorm '
'input={1}'.format(name, cur_descriptor))
elif operation == 'renorm':
configs.append('component name={0}.renorm type=NormalizeComponent '
'dim={1} target-rms={2}'.format(
name, cur_num_filters * cur_height,
self.config['target-rms']))
configs.append('component-node name={0}.renorm component={0}.renorm '
'input={1}'.format(name, cur_descriptor))
elif operation == 'relu':
configs.append('component name={0}.relu type=RectifiedLinearComponent '
'dim={1} block-dim={2} self-repair-scale={3} '
'self-repair-lower-threshold={4}'.format(
name, cur_num_filters * cur_height, cur_num_filters,
self.config['self-repair-scale'],
self.config['self-repair-lower-threshold']))
configs.append('component-node name={0}.relu component={0}.relu '
'input={1}'.format(name, cur_descriptor))
elif operation == 'dropout':
configs.append('component name={0}.dropout type=DropoutComponent '
'dim={1} dropout-proportion={2}'.format(
name, cur_num_filters * cur_height,
self.config['dropout-proportion']))
configs.append('component-node name={0}.dropout component={0}.dropout '
'input={1}'.format(name, cur_descriptor))
elif operation == 'so':
configs.append('component name={0}.so type=ScaleAndOffsetComponent '
'dim={1} block-dim={2}'.format(
name, cur_num_filters * cur_height, cur_num_filters))
configs.append('component-node name={0}.so component={0}.so '
'input={1}'.format(name, cur_descriptor))
else:
raise RuntimeError("Un-handled operation type: " + operation)
cur_descriptor = '{0}.{1}'.format(name, operation)
return configs
# This class is for lines like the following:
#
# res-block name=res1 num-filters=64 height=32 time-period=1
#
# It implements a residual block as in ResNets, with pre-activation, and with
# some small differences-- basically, instead of adding the input to the output,
# we put a convolutional layer in there but initialize it to the unit matrix and
# if you want you can give it a relatively small (or even zero) learning rate
# and max-change. And there is batch-norm in that path also.
#
# The number of filters is the same on the input and output; it is actually
# redundant to write it in the config file, because given that we know the
# height, we can work it out from the dimension of the input (as dimension =
# height * num-filters). But we allow it to be specified anyway, for clarity.
#
# Note: the res-block does not support subsampling or changing the number of
# filters. If you want to do that, we recommend that you should do it with a
# single relu-batchnorm-conv-layer.
#
# Here are the most important configuration values, with defaults shown if
# defaults exist:
#
# input='[-1]' Descriptor giving the input of the layer.
# height The input and output height of the image, e.g. 40. Note: the width
# is associated with the time dimension and is dealt with
# implicitly, so it's not specified here.
# num-filters The number of filters on the input and output, e.g. 64.
# It does not have to be specified; if it is not specified,
# we work it out from the input dimension.
# num-bottleneck-filters If specified then this will be a 'bottleneck'
# ResBlock, in which there is a 1x1 convolution from
# num-filters->num-bottleneck-filters, a 3x3 convolution
# from num-bottleneck-filters->num-bottleneck-filters, and
# a 1x1 convolution from num-bottleneck-filters->num-filters.
#
# time-period=1 Think of this as the stride in the time dimension. At the
# input of the network will always have time-period=1; then
# after subsampling once in time we'd have time-period=2; then
# after subsampling again we'd have time-period=4. Because of
# the way nnet3 works, subsampling on the time axis is an
# implicit, not explicit, operation.
# height-period=1 This will almost always be left at the default (1). It is
# analogous to time-period, but because the height, unlike the
# time, is explicitly subsampled, in normal topologies this should
# be left at 1.
#
# bypass-source=noop
# The output of this component is Sum(convolution, x), and
# this option controls what 'x' is. There are 3 options
# here: 'noop', 'input', 'relu' or 'batchnorm'. 'noop' is
# equivalent to 'input' in what it computes; it just
# inserts a 'noop' component in order to make the
# computation more efficient. For both 'noop' and
# 'input', x is the input to this component. If
# bypass-source=relu then we use the relu of the
# input; if 'batchnorm', then we use the relu+batchnorm of
# the input.
# allow-zero-padding=true By default this will allow zero-padding in the time
# dimension, meaning that you don't need extra frames at
# the input to compute the output. There may be ASR
# applications where you want to pad in the time dimension
# with repeats of the first or last frame (as we do for
# TDNNs), where it would be appropriate to write
# allow-zero-padding=false. Note: the way we have
# set it up, it does zero-padding on the height axis
# regardless
#
# Less important config variables:
# self-repair-scale=2.0e-05 This affects the ReLu's. It is a scale on the
# 'self-repair' mechanism that nudges the inputs to the
# ReLUs into the appropriate range in cases where
# the unit is active either too little of the time
# (<10%) or too much of the time (>90%).
# max-change=0.75 Max-parameter-change constant (per minibatch)
# used for convolutional components.
#
#
# The following natural-gradient-related configuration variables are passed in
# to the convolution components, if specified:
# use-natural-gradient (bool)
# rank-in, rank-out (int)
# num-minibatches-history (float)
# alpha-in, alpha-out (float)
# the following is also passed into the convolution components, if specified:
# l2-regularize (float)
#
class XconfigResBlock(XconfigLayerBase):
def __init__(self, first_token, key_to_value, prev_names = None):
assert first_token == 'res-block'
XconfigLayerBase.__init__(self, first_token, key_to_value, prev_names)
def set_default_configs(self):
self.config = {'input':'[-1]',
'height':-1,
'num-filters':-1,
'num-bottleneck-filters':-1,
'time-period':1,
'height-period':1,
'self-repair-scale': 2.0e-05,
'self-repair-lower-threshold1': 0.05,
'self-repair-lower-threshold2': 0.05,
'self-repair-lower-threshold3': 0.05,
'max-change': 0.75,
'allow-zero-padding': True,
'bypass-source' : 'noop',
# the following are not really inspected by this level of
# code, just passed through (but not if left at '').
'param-stddev':'', 'bias-stddev':'',
'use-natural-gradient':'',
'rank-in':'', 'rank-out':'',
'num-minibatches-history':'',
'alpha-in':'', 'alpha-out':'', 'l2-regularize':'' }
def set_derived_configs(self):
# set 'num-filters' or check it..
input_dim = self.descriptors['input']['dim']
height = self.config['height']
cur_num_filters = self.config['num-filters']
if cur_num_filters == -1:
if input_dim % height != 0:
raise RuntimeError("Specified image height {0} does not "
"divide the input dim {1}".format(
height, input_dim))
self.config['num-filters'] = input_dim / height
elif input_dim != cur_num_filters * height:
raise RuntimeError("Expected the input-dim to equal "
"height={0} * num-filters={1} = {2}, but "
"it is {3}".format(
height, cur_num_filters,
height * cur_num_filters,
input_dim));
def check_configs(self):
# we checked the dimensions in set_derived_configs.
if not self.config['bypass-source'] in [
'input', 'noop', 'relu', 'batchnorm' ]:
raise RuntimeError("Expected direct-convolution-source to "
"be input, relu or batchnorm, got: {1}".format(
self.config['direct-convolution-source']))
def auxiliary_outputs(self):
return []
def output_name(self, auxiliary_output = None):
bypass_source = self.config['bypass-source']
b = self.config['num-bottleneck-filters']
conv = ('{0}.conv2' if b <= 0 else '{0}.conv3').format(self.name)
if bypass_source == 'input':
residual = self.descriptors['input']['final-string']
elif bypass_source == 'noop':
# we let the noop be the sum of the convolutional part and the
# input, so just return the output of the no-op component.
return '{0}.noop'.format(self.name)
elif bypass_source == 'relu':
residual = '{0}.relu1'.format(self.name)
else:
assert bypass_source == 'batchnorm'
residual = '{0}.batchnorm1'.format(self.name)
return 'Sum({0}, {1})'.format(conv, residual)
def output_dim(self, auxiliary_output = None):
assert auxiliary_output is None
input_dim = self.descriptors['input']['dim']
return input_dim
def get_full_config(self):
ans = []
b = self.config['num-bottleneck-filters']
if b <= 0:
config_lines = self._generate_normal_resblock_config()
else:
config_lines = self._generate_bottleneck_resblock_config()
for line in config_lines:
for config_name in ['ref', 'final']:
# we do not support user specified matrices in CNN initialization
# so 'ref' and 'final' configs are the same.
ans.append((config_name, line))
return ans
# _generate_normal_resblock_config is a convenience function to generate the
# res-block config (the non-bottleneck version).
#
# The main path inside the res-block in the non-bottleneck case is as
# follows:
#
# input -> relu1 -> batchnorm1 -> conv1 -> relu2 -> batchnorm2 -> conv2
#
# We put the relu before the batchnorm because we think it makes more sense;
# because the Torch people seemed to find that this works better
# (https://github.com/gcr/torch-residual-networks/issues/5);
# and because in our batchnorm component we haven't implemented the beta and
# gamma; these would be essential to having it work before relu, but
# when before a convolution or linear component, they add no extra modeling
# power.
#
# The output of the res-block can be the sum of the last | |
edges_original = [(1, 8, {"type": "taxi"}),
(1, 9, {"type": "taxi"}),
(1, 58, {"type": "bus"}),
(1, 46, {"type": "bus"}),
(1, 46, {"type": "metro"}),
(2, 10, {"type": "taxi"}),
(2, 20, {"type": "taxi"}),
(3, 4, {"type": "taxi"}),
(3, 11, {"type": "taxi"}),
(3, 12, {"type": "taxi"}),
(3, 22, {"type": "bus"}),
(3, 23, {"type": "bus"}),
(4, 13, {"type": "taxi"}),
(5, 15, {"type": "taxi"}),
(5, 16, {"type": "taxi"}),
(6, 7, {"type": "taxi"}),
(6, 29, {"type": "taxi"}),
(7, 17, {"type": "taxi"}),
(7, 42, {"type": "bus"}),
(8, 18, {"type": "taxi"}),
(8, 19, {"type": "taxi"}),
(9, 19, {"type": "taxi"}),
(9, 20, {"type": "taxi"}),
(10, 11, {"type": "taxi"}),
(10, 21, {"type": "taxi"}),
(11, 22, {"type": "taxi"}),
(12, 23, {"type": "taxi"}),
(13, 14, {"type": "bus"}),
(13, 23, {"type": "taxi"}),
(13, 23, {"type": "bus"}),
(13, 24, {"type": "taxi"}),
(13, 46, {"type": "metro"}),
(13, 52, {"type": "bus"}),
(13, 67, {"type": "metro"}),
(13, 89, {"type": "metro"}),
(14, 15, {"type": "taxi"}),
(14, 15, {"type": "bus"}),
(14, 25, {"type": "taxi"}),
(14, 52, {"type": "bus"}),
(15, 16, {"type": "taxi"}),
(15, 26, {"type": "taxi"}),
(15, 28, {"type": "taxi"}),
(15, 29, {"type": "bus"}),
(15, 41, {"type": "bus"}),
(16, 28, {"type": "taxi"}),
(16, 29, {"type": "taxi"}),
(17, 29, {"type": "taxi"}),
(17, 30, {"type": "taxi"}),
(18, 31, {"type": "taxi"}),
(18, 43, {"type": "taxi"}),
(19, 32, {"type": "taxi"}),
(20, 33, {"type": "taxi"}),
(21, 33, {"type": "taxi"}),
(22, 23, {"type": "taxi"}),
(22, 23, {"type": "bus"}),
(22, 34, {"type": "taxi"}),
(22, 34, {"type": "bus"}),
(22, 35, {"type": "taxi"}),
(22, 65, {"type": "bus"}),
(23, 37, {"type": "taxi"}),
(23, 67, {"type": "bus"}),
(24, 37, {"type": "taxi"}),
(24, 38, {"type": "taxi"}),
(25, 38, {"type": "taxi"}),
(25, 39, {"type": "taxi"}),
(26, 27, {"type": "taxi"}),
(26, 39, {"type": "taxi"}),
(27, 28, {"type": "taxi"}),
(27, 40, {"type": "taxi"}),
(28, 41, {"type": "taxi"}),
(29, 41, {"type": "taxi"}),
(29, 41, {"type": "bus"}),
(29, 42, {"type": "taxi"}),
(29, 42, {"type": "bus"}),
(29, 55, {"type": "bus"}),
(29, 89, {"type": "metro"}),
(30, 42, {"type": "taxi"}),
(31, 43, {"type": "taxi"}),
(31, 44, {"type": "taxi"}),
(32, 33, {"type": "taxi"}),
(32, 44, {"type": "taxi"}),
(32, 45, {"type": "taxi"}),
(33, 46, {"type": "taxi"}),
(34, 46, {"type": "bus"}),
(34, 47, {"type": "taxi"}),
(34, 48, {"type": "taxi"}),
(34, 63, {"type": "bus"}),
(35, 36, {"type": "taxi"}),
(35, 48, {"type": "taxi"}),
(35, 65, {"type": "taxi"}),
(36, 37, {"type": "taxi"}),
(36, 49, {"type": "taxi"}),
(37, 50, {"type": "taxi"}),
(38, 50, {"type": "taxi"}),
(38, 51, {"type": "taxi"}),
(39, 51, {"type": "taxi"}),
(39, 52, {"type": "taxi"}),
(40, 41, {"type": "taxi"}),
(40, 52, {"type": "taxi"}),
(40, 53, {"type": "taxi"}),
(41, 52, {"type": "bus"}),
(41, 54, {"type": "taxi"}),
(41, 87, {"type": "bus"}),
(42, 56, {"type": "taxi"}),
(42, 72, {"type": "taxi"}),
(42, 72, {"type": "bus"}),
(43, 57, {"type": "taxi"}),
(44, 58, {"type": "taxi"}),
(45, 46, {"type": "taxi"}),
(45, 58, {"type": "taxi"}),
(45, 59, {"type": "taxi"}),
(45, 60, {"type": "taxi"}),
(46, 47, {"type": "taxi"}),
(46, 58, {"type": "bus"}),
(46, 61, {"type": "taxi"}),
(46, 74, {"type": "metro"}),
(46, 78, {"type": "bus"}),
(46, 79, {"type": "metro"}),
(47, 62, {"type": "taxi"}),
(48, 62, {"type": "taxi"}),
(48, 63, {"type": "taxi"}),
(49, 50, {"type": "taxi"}),
(49, 66, {"type": "taxi"}),
(51, 52, {"type": "taxi"}),
(51, 67, {"type": "taxi"}),
(51, 68, {"type": "taxi"}),
(52, 67, {"type": "bus"}),
(52, 69, {"type": "taxi"}),
(52, 86, {"type": "bus"}),
(53, 54, {"type": "taxi"}),
(53, 69, {"type": "taxi"}),
(54, 55, {"type": "taxi"}),
(54, 70, {"type": "taxi"}),
(55, 71, {"type": "taxi"}),
(55, 89, {"type": "bus"}),
(56, 91, {"type": "taxi"}),
(57, 58, {"type": "taxi"}),
(57, 73, {"type": "taxi"}),
(58, 59, {"type": "taxi"}),
(58, 74, {"type": "taxi"}),
(58, 74, {"type": "bus"}),
(58, 75, {"type": "taxi"}),
(58, 77, {"type": "bus"}),
(59, 75, {"type": "taxi"}),
(59, 76, {"type": "taxi"}),
(60, 61, {"type": "taxi"}),
(60, 76, {"type": "taxi"}),
(61, 62, {"type": "taxi"}),
(61, 76, {"type": "taxi"}),
(61, 78, {"type": "taxi"}),
(62, 79, {"type": "taxi"}),
(63, 64, {"type": "taxi"}),
(63, 65, {"type": "bus"}),
(63, 80, {"type": "taxi"}),
(63, 100, {"type": "bus"}),
(64, 65, {"type": "taxi"}),
(64, 81, {"type": "taxi"}),
(65, 66, {"type": "taxi"}),
(65, 67, {"type": "bus"}),
(65, 82, {"type": "taxi"}),
(65, 82, {"type": "bus"}),
(66, 67, {"type": "taxi"}),
(66, 82, {"type": "taxi"}),
(67, 68, {"type": "taxi"}),
(67, 82, {"type": "bus"}),
(67, 84, {"type": "taxi"}),
(67, 79, {"type": "metro"}),
(67, 89, {"type": "metro"}),
(67, 102, {"type": "bus"}),
(67, 111, {"type": "metro"}),
(68, 69, {"type": "taxi"}),
(68, 85, {"type": "taxi"}),
(69, 86, {"type": "taxi"}),
(70, 71, {"type": "taxi"}),
(70, 87, {"type": "taxi"}),
(71, 72, {"type": "taxi"}),
(71, 89, {"type": "taxi"}),
(72, 90, {"type": "taxi"}),
(72, 91, {"type": "taxi"}),
(72, 105, {"type": "bus"}),
(72, 107, {"type": "bus"}),
(73, 74, {"type": "taxi"}),
(73, 92, {"type": "taxi"}),
(74, 75, {"type": "taxi"}),
(74, 92, {"type": "taxi"}),
(74, 94, {"type": "bus"}),
(75, 94, {"type": "taxi"}),
(76, 77, {"type": "taxi"}),
(77, 78, {"type": "taxi"}),
(77, 78, {"type": "bus"}),
(77, 94, {"type": "bus"}),
(77, 95, {"type": "taxi"}),
(77, 96, {"type": "taxi"}),
(77, 124, {"type": "bus"}),
(78, 79, {"type": "taxi"}),
(78, 79, {"type": "bus"}),
(78, 97, {"type": "taxi"}),
(79, 93, {"type": "metro"}),
(79, 98, {"type": "taxi"}),
(79, 111, {"type": "metro"}),
(80, 99, {"type": "taxi"}),
(80, 100, {"type": "taxi"}),
(81, 82, {"type": "taxi"}),
(81, 100, {"type": "taxi"}),
(82, 101, {"type": "taxi"}),
(82, 140, {"type": "bus"}),
(83, 101, {"type": "taxi"}),
(83, 102, {"type": "taxi"}),
(84, 85, {"type": "taxi"}),
(85, 103, {"type": "taxi"}),
(86, 87, {"type": "bus"}),
(86, 102, {"type": "bus"}),
(86, 103, {"type": "taxi"}),
(86, 104, {"type": "taxi"}),
(86, 116, {"type": "bus"}),
(87, 88, {"type": "taxi"}),
(87, 105, {"type": "bus"}),
(88, 89, {"type": "taxi"}),
(88, 117, {"type": "taxi"}),
(89, 105, {"type": "taxi"}),
(89, 105, {"type": "bus"}),
(89, 128, {"type": "metro"}),
(89, 140, {"type": "metro"}),
(90, 91, {"type": "taxi"}),
(90, 105, {"type": "taxi"}),
(91, 105, {"type": "taxi"}),
(91, 107, {"type": "taxi"}),
(92, 93, {"type": "taxi"}),
(93, 94, {"type": "taxi"}),
(93, 94, {"type": "bus"}),
(94, 95, {"type": "taxi"}),
(95, 122, {"type": "taxi"}),
(96, 97, {"type": "taxi"}),
(96, 109, {"type": "taxi"}),
(97, 98, {"type": "taxi"}),
(97, 109, {"type": "taxi"}),
(98, 99, {"type": "taxi"}),
(98, 110, {"type": "taxi"}),
(99, 110, {"type": "taxi"}),
(99, 112, {"type": "taxi"}),
(100, 101, {"type": "taxi"}),
(100, 111, {"type": "bus"}),
(100, 112, {"type": "taxi"}),
(100, 113, {"type": "taxi"}),
(101, 114, {"type": "taxi"}),
(102, 103, {"type": "taxi"}),
(102, 115, {"type": "taxi"}),
(102, 127, {"type": "taxi"}),
(104, 116, {"type": "taxi"}),
(105, 106, {"type": "taxi"}),
(105, 107, {"type": "bus"}),
(105, 108, {"type": "taxi"}),
(105, 108, {"type": "bus"}),
(106, 107, {"type": "taxi"}),
(107, 119, {"type": "taxi"}),
(107, 161, {"type": "bus"}),
(108, 115, {"type": "ferry"}),
(108, 116, {"type": "bus"}),
(108, 117, {"type": "taxi"}),
(108, 119, {"type": "taxi"}),
(108, 135, {"type": "bus"}),
(109, 110, {"type": "taxi"}),
(109, 124, {"type": "taxi"}),
(110, 111, {"type": "taxi"}),
(111, 112, {"type": "taxi"}),
(111, 124, {"type": "taxi"}),
(111, 124, {"type": "bus"}),
(111, 153, {"type": "metro"}),
(111, 163, {"type": "metro"}),
(112, 125, {"type": "taxi"}),
(113, 114, {"type": "taxi"}),
(113, 125, {"type": "taxi"}),
(114, 115, {"type": "taxi"}),
(114, 126, {"type": "taxi"}),
(114, 131, {"type": "taxi"}),
(114, 132, {"type": "taxi"}),
(115, 127, {"type": "taxi"}),
(115, 157, {"type": "ferry"}),
(116, 117, {"type": "taxi"}),
(116, 118, {"type": "taxi"}),
(116, 127, {"type": "taxi"}),
(116, 127, {"type": "bus"}),
(116, 142, {"type": "bus"}),
(117, 129, {"type": "taxi"}),
(118, 129, {"type": "taxi"}),
(118, 134, {"type": "taxi"}),
(118, 142, {"type": "taxi"}),
(119, 136, {"type": "taxi"}),
(120, 121, {"type": "taxi"}),
(120, 144, {"type": "taxi"}),
(121, 122, {"type": "taxi"}),
(121, 145, {"type": "taxi"}),
(122, 123, {"type": "taxi"}),
(122, 123, {"type": "bus"}),
(122, 144, {"type": "bus"}),
(122, 146, {"type": "taxi"}),
(123, 124, {"type": "taxi"}),
(123, 124, {"type": "bus"}),
(123, 137, {"type": "taxi"}),
(123, 144, {"type": "bus"}),
(123, 148, {"type": "taxi"}),
(123, 149, {"type": "taxi"}),
(123, 165, {"type": "bus"}),
(124, 138, {"type": "taxi"}),
(124, 153, {"type": "bus"}),
(125, 131, {"type": "taxi"}),
(126, 127, {"type": "taxi"}),
(126, 140, {"type": "taxi"}),
(127, 133, {"type": "taxi"}),
(127, 133, {"type": "bus"}),
(127, 134, {"type": "taxi"}),
(128, 135, {"type": "bus"}),
(128, 140, {"type": "metro"}),
(128, 142, {"type": "taxi"}),
(128, 142, {"type": "bus"}),
(128, 143, {"type": "taxi"}),
(128, 160, {"type": "taxi"}),
(128, 161, {"type": "bus"}),
(128, 172, {"type": "taxi"}),
(128, 185, | |
"""DGL PyTorch DataLoaders"""
from collections.abc import Mapping, Sequence
from queue import Queue, Empty, Full
import itertools
import threading
from distutils.version import LooseVersion
import math
import inspect
import re
import atexit
import os
import psutil
import torch
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from ..base import NID, EID, dgl_warning
from ..batch import batch as batch_graphs
from ..heterograph import DGLHeteroGraph
from ..utils import (
recursive_apply, ExceptionWrapper, recursive_apply_pair, set_num_threads,
context_of, dtype_of)
from ..frame import LazyFeature
from ..storages import wrap_storage
from .base import BlockSampler, as_edge_prediction_sampler
from .. import backend as F
from ..distributed import DistGraph
from ..multiprocessing import call_once_and_share
PYTHON_EXIT_STATUS = False
def _set_python_exit_flag():
global PYTHON_EXIT_STATUS
PYTHON_EXIT_STATUS = True
atexit.register(_set_python_exit_flag)
prefetcher_timeout = int(os.environ.get('DGL_PREFETCHER_TIMEOUT', '30'))
class _TensorizedDatasetIter(object):
def __init__(self, dataset, batch_size, drop_last, mapping_keys):
self.dataset = dataset
self.batch_size = batch_size
self.drop_last = drop_last
self.mapping_keys = mapping_keys
self.index = 0
# For PyTorch Lightning compatibility
def __iter__(self):
return self
def _next_indices(self):
num_items = self.dataset.shape[0]
if self.index >= num_items:
raise StopIteration
end_idx = self.index + self.batch_size
if end_idx > num_items:
if self.drop_last:
raise StopIteration
end_idx = num_items
batch = self.dataset[self.index:end_idx]
self.index += self.batch_size
return batch
def __next__(self):
batch = self._next_indices()
if self.mapping_keys is None:
# clone() fixes #3755, probably. Not sure why. Need to take a look afterwards.
return batch.clone()
# convert the type-ID pairs to dictionary
type_ids = batch[:, 0]
indices = batch[:, 1]
type_ids_sortidx = torch.argsort(type_ids)
type_ids = type_ids[type_ids_sortidx]
indices = indices[type_ids_sortidx]
type_id_uniq, type_id_count = torch.unique_consecutive(type_ids, return_counts=True)
type_id_uniq = type_id_uniq.tolist()
type_id_offset = type_id_count.cumsum(0).tolist()
type_id_offset.insert(0, 0)
id_dict = {
self.mapping_keys[type_id_uniq[i]]:
indices[type_id_offset[i]:type_id_offset[i+1]].clone()
for i in range(len(type_id_uniq))}
return id_dict
def _get_id_tensor_from_mapping(indices, device, keys):
dtype = dtype_of(indices)
id_tensor = torch.empty(
sum(v.shape[0] for v in indices.values()), 2, dtype=dtype, device=device)
offset = 0
for i, k in enumerate(keys):
if k not in indices:
continue
index = indices[k]
length = index.shape[0]
id_tensor[offset:offset+length, 0] = i
id_tensor[offset:offset+length, 1] = index
offset += length
return id_tensor
def _divide_by_worker(dataset, batch_size, drop_last):
num_samples = dataset.shape[0]
worker_info = torch.utils.data.get_worker_info()
if worker_info:
num_batches = (num_samples + (0 if drop_last else batch_size - 1)) // batch_size
num_batches_per_worker = num_batches // worker_info.num_workers
left_over = num_batches % worker_info.num_workers
start = (num_batches_per_worker * worker_info.id) + min(left_over, worker_info.id)
end = start + num_batches_per_worker + (worker_info.id < left_over)
start *= batch_size
end = min(end * batch_size, num_samples)
dataset = dataset[start:end]
return dataset
class TensorizedDataset(torch.utils.data.IterableDataset):
"""Custom Dataset wrapper that returns a minibatch as tensors or dicts of tensors.
When the dataset is on the GPU, this significantly reduces the overhead.
"""
def __init__(self, indices, batch_size, drop_last):
if isinstance(indices, Mapping):
self._mapping_keys = list(indices.keys())
self._device = next(iter(indices.values())).device
self._id_tensor = _get_id_tensor_from_mapping(
indices, self._device, self._mapping_keys)
else:
self._id_tensor = indices
self._device = indices.device
self._mapping_keys = None
# Use a shared memory array to permute indices for shuffling. This is to make sure that
# the worker processes can see it when persistent_workers=True, where self._indices
# would not be duplicated every epoch.
self._indices = torch.empty(self._id_tensor.shape[0], dtype=torch.int64).share_memory_()
self._indices[:] = torch.arange(self._id_tensor.shape[0])
self.batch_size = batch_size
self.drop_last = drop_last
def shuffle(self):
"""Shuffle the dataset."""
# TODO: may need an in-place shuffle kernel
self._indices[:] = self._indices[torch.randperm(self._indices.shape[0])]
def __iter__(self):
indices = _divide_by_worker(self._indices, self.batch_size, self.drop_last)
id_tensor = self._id_tensor[indices.to(self._device)]
return _TensorizedDatasetIter(
id_tensor, self.batch_size, self.drop_last, self._mapping_keys)
def __len__(self):
num_samples = self._id_tensor.shape[0]
return (num_samples + (0 if self.drop_last else (self.batch_size - 1))) // self.batch_size
class DDPTensorizedDataset(torch.utils.data.IterableDataset):
"""Custom Dataset wrapper that returns a minibatch as tensors or dicts of tensors.
When the dataset is on the GPU, this significantly reduces the overhead.
This class additionally saves the index tensor in shared memory and therefore
avoids duplicating the same index tensor during shuffling.
"""
def __init__(self, indices, batch_size, drop_last, ddp_seed):
if isinstance(indices, Mapping):
self._mapping_keys = list(indices.keys())
len_indices = sum(len(v) for v in indices.values())
else:
self._mapping_keys = None
len_indices = len(indices)
self.rank = dist.get_rank()
self.num_replicas = dist.get_world_size()
self.seed = ddp_seed
self.epoch = 0
self.batch_size = batch_size
self.drop_last = drop_last
if self.drop_last and len_indices % self.num_replicas != 0:
self.num_samples = math.ceil((len_indices - self.num_replicas) / self.num_replicas)
else:
self.num_samples = math.ceil(len_indices / self.num_replicas)
self.total_size = self.num_samples * self.num_replicas
# If drop_last is True, we create a shared memory array larger than the number
# of indices since we will need to pad it after shuffling to make it evenly
# divisible before every epoch. If drop_last is False, we create an array
# with the same size as the indices so we can trim it later.
self.shared_mem_size = self.total_size if not self.drop_last else len_indices
self.num_indices = len_indices
if isinstance(indices, Mapping):
self._device = next(iter(indices.values())).device
self._id_tensor = call_once_and_share(
lambda: _get_id_tensor_from_mapping(indices, self._device, self._mapping_keys),
(self.num_indices, 2), dtype_of(indices))
else:
self._id_tensor = indices
self._device = self._id_tensor.device
self._indices = call_once_and_share(
self._create_shared_indices, (self.shared_mem_size,), torch.int64)
def _create_shared_indices(self):
indices = torch.empty(self.shared_mem_size, dtype=torch.int64)
num_ids = self._id_tensor.shape[0]
indices[:num_ids] = torch.arange(num_ids)
indices[num_ids:] = torch.arange(self.shared_mem_size - num_ids)
return indices
def shuffle(self):
"""Shuffles the dataset."""
# Only rank 0 does the actual shuffling. The other ranks wait for it.
if self.rank == 0:
self._indices[:self.num_indices] = self._indices[
torch.randperm(self.num_indices, device=self._device)]
if not self.drop_last:
# pad extra
self._indices[self.num_indices:] = \
self._indices[:self.total_size - self.num_indices]
dist.barrier()
def __iter__(self):
start = self.num_samples * self.rank
end = self.num_samples * (self.rank + 1)
indices = _divide_by_worker(self._indices[start:end], self.batch_size, self.drop_last)
id_tensor = self._id_tensor[indices.to(self._device)]
return _TensorizedDatasetIter(
id_tensor, self.batch_size, self.drop_last, self._mapping_keys)
def __len__(self):
return (self.num_samples + (0 if self.drop_last else (self.batch_size - 1))) // \
self.batch_size
def _prefetch_update_feats(feats, frames, types, get_storage_func, id_name, device, pin_prefetcher):
for tid, frame in enumerate(frames):
type_ = types[tid]
default_id = frame.get(id_name, None)
for key in frame.keys():
column = frame._columns[key]
if isinstance(column, LazyFeature):
parent_key = column.name or key
if column.id_ is None and default_id is None:
raise DGLError(
'Found a LazyFeature with no ID specified, '
'and the graph does not have dgl.NID or dgl.EID columns')
feats[tid, key] = get_storage_func(parent_key, type_).fetch(
column.id_ or default_id, device, pin_prefetcher)
# This class exists to avoid recursion into the feature dictionary returned by the
# prefetcher when calling recursive_apply().
class _PrefetchedGraphFeatures(object):
__slots__ = ['node_feats', 'edge_feats']
def __init__(self, node_feats, edge_feats):
self.node_feats = node_feats
self.edge_feats = edge_feats
def _prefetch_for_subgraph(subg, dataloader):
node_feats, edge_feats = {}, {}
_prefetch_update_feats(
node_feats, subg._node_frames, subg.ntypes, dataloader.graph.get_node_storage,
NID, dataloader.device, dataloader.pin_prefetcher)
_prefetch_update_feats(
edge_feats, subg._edge_frames, subg.canonical_etypes, dataloader.graph.get_edge_storage,
EID, dataloader.device, dataloader.pin_prefetcher)
return _PrefetchedGraphFeatures(node_feats, edge_feats)
def _prefetch_for(item, dataloader):
if isinstance(item, DGLHeteroGraph):
return _prefetch_for_subgraph(item, dataloader)
elif isinstance(item, LazyFeature):
return dataloader.other_storages[item.name].fetch(
item.id_, dataloader.device, dataloader.pin_prefetcher)
else:
return None
def _await_or_return(x):
if hasattr(x, 'wait'):
return x.wait()
elif isinstance(x, _PrefetchedGraphFeatures):
node_feats = recursive_apply(x.node_feats, _await_or_return)
edge_feats = recursive_apply(x.edge_feats, _await_or_return)
return _PrefetchedGraphFeatures(node_feats, edge_feats)
else:
return x
def _prefetch(batch, dataloader, stream):
# feats has the same nested structure of batch, except that
# (1) each subgraph is replaced with a pair of node features and edge features, both
# being dictionaries whose keys are (type_id, column_name) and values are either
# tensors or futures.
# (2) each LazyFeature object is replaced with a tensor or future.
# (3) everything else are replaced with None.
#
# Once the futures are fetched, this function waits for them to complete by
# calling its wait() method.
with torch.cuda.stream(stream):
feats = recursive_apply(batch, _prefetch_for, dataloader)
feats = recursive_apply(feats, _await_or_return)
return feats
def _assign_for(item, feat):
if isinstance(item, DGLHeteroGraph):
subg = item
for (tid, key), value in feat.node_feats.items():
assert isinstance(subg._node_frames[tid][key], LazyFeature)
subg._node_frames[tid][key] = value
for (tid, key), value in feat.edge_feats.items():
assert isinstance(subg._edge_frames[tid][key], LazyFeature)
subg._edge_frames[tid][key] = value
return subg
elif isinstance(item, LazyFeature):
return feat
else:
return item
def _put_if_event_not_set(queue, result, event):
while not event.is_set():
try:
queue.put(result, timeout=1.0)
break
except Full:
continue
def _prefetcher_entry(
dataloader_it, dataloader, queue, num_threads, use_alternate_streams,
done_event):
# PyTorch will set the number of threads to 1 which slows down pin_memory() calls
# in main process if a prefetching thread is created.
if num_threads is not None:
torch.set_num_threads(num_threads)
if use_alternate_streams:
stream = (
torch.cuda.Stream(device=dataloader.device)
if dataloader.device.type == 'cuda' else None)
else:
stream = None
try:
while not done_event.is_set():
try:
batch = next(dataloader_it)
except StopIteration:
break
batch = recursive_apply(batch, restore_parent_storage_columns, dataloader.graph)
feats = _prefetch(batch, dataloader, stream)
_put_if_event_not_set(queue, (
# batch will be already in pinned memory as per the behavior of
# PyTorch DataLoader.
recursive_apply(
batch, lambda x: x.to(dataloader.device, non_blocking=True)),
feats,
stream.record_event() if stream is not None else None,
None),
done_event)
_put_if_event_not_set(queue, (None, None, None, None), done_event)
except: # pylint: disable=bare-except
_put_if_event_not_set(
queue, (None, None, None, ExceptionWrapper(where='in prefetcher')), done_event)
# DGLHeteroGraphs have the semantics of lazy feature slicing with subgraphs. Such behavior depends
# on that DGLHeteroGraph's | |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.devtools.containeranalysis.v1 ContainerAnalysis API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
import grpc
from google.cloud.devtools.containeranalysis_v1.gapic import (
container_analysis_client_config,
)
from google.cloud.devtools.containeranalysis_v1.gapic.transports import (
container_analysis_grpc_transport,
)
from google.cloud.devtools.containeranalysis_v1.proto import containeranalysis_pb2_grpc
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-containeranalysis"
).version
class ContainerAnalysisClient(object):
"""
Retrieves analysis results of Cloud components such as Docker container
images. The Container Analysis API is an implementation of the
`Grafeas <grafeas.io>`__ API.
Analysis results are stored as a series of occurrences. An
``Occurrence`` contains information about a specific analysis instance
on a resource. An occurrence refers to a ``Note``. A note contains
details describing the analysis and is generally stored in a separate
project, called a ``Provider``. Multiple occurrences can refer to the
same note.
For example, an SSL vulnerability could affect multiple images. In this
case, there would be one note for the vulnerability and an occurrence
for each image with the vulnerability referring to that note.
"""
SERVICE_ADDRESS = "containeranalysis.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.devtools.containeranalysis.v1.ContainerAnalysis"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ContainerAnalysisClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def note_path(cls, project, note):
"""Return a fully-qualified note string."""
return google.api_core.path_template.expand(
"projects/{project}/notes/{note}", project=project, note=note
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.ContainerAnalysisGrpcTransport,
Callable[[~.Credentials, type], ~.ContainerAnalysisGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = container_analysis_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=container_analysis_grpc_transport.ContainerAnalysisGrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = container_analysis_grpc_transport.ContainerAnalysisGrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def set_iam_policy(
self,
resource,
policy,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the access control policy on the specified note or occurrence.
Requires ``containeranalysis.notes.setIamPolicy`` or
``containeranalysis.occurrences.setIamPolicy`` permission if the
resource is a note or an occurrence, respectively.
The resource takes the format ``projects/[PROJECT_ID]/notes/[NOTE_ID]``
for notes and ``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`` for
occurrences.
Example:
>>> from google.cloud.devtools import containeranalysis_v1
>>>
>>> client = containeranalysis_v1.ContainerAnalysisClient()
>>>
>>> resource = client.note_path('[PROJECT]', '[NOTE]')
>>>
>>> # TODO: Initialize `policy`:
>>> policy = {}
>>>
>>> response = client.set_iam_policy(resource, policy)
Args:
resource (str): REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this field.
policy (Union[dict, ~google.cloud.devtools.containeranalysis_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.devtools.containeranalysis_v1.types.Policy`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.devtools.containeranalysis_v1.types.Policy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_iam_policy" not in self._inner_api_calls:
self._inner_api_calls[
"set_iam_policy"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_iam_policy,
default_retry=self._method_configs["SetIamPolicy"].retry,
default_timeout=self._method_configs["SetIamPolicy"].timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("resource", resource)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["set_iam_policy"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_iam_policy(
self,
resource,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the access control policy for a note or an occurrence resource.
Requires ``containeranalysis.notes.setIamPolicy`` or
``containeranalysis.occurrences.setIamPolicy`` permission if the
resource is a note or occurrence, respectively.
The resource takes the format ``projects/[PROJECT_ID]/notes/[NOTE_ID]``
for notes and ``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`` for
occurrences.
Example:
>>> from google.cloud.devtools import containeranalysis_v1
>>>
>>> client = containeranalysis_v1.ContainerAnalysisClient()
>>>
>>> resource = client.note_path('[PROJECT]', '[NOTE]')
>>>
>>> response = client.get_iam_policy(resource)
Args:
resource (str): REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this field.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.devtools.containeranalysis_v1.types.Policy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_iam_policy" not in self._inner_api_calls:
self._inner_api_calls[
"get_iam_policy"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_iam_policy,
default_retry=self._method_configs["GetIamPolicy"].retry,
default_timeout=self._method_configs["GetIamPolicy"].timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.GetIamPolicyRequest(resource=resource)
if metadata is | |
3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
10 15 1 2 -1.</_>
<_>
10 16 1 1 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
10 15 2 2 -1.</_>
<_>
10 15 1 1 2.</_>
<_>
11 16 1 1 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
10 16 1 2 -1.</_>
<_>
10 17 1 1 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
10 17 1 2 -1.</_>
<_>
10 18 1 1 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
11 4 2 2 -1.</_>
<_>
12 4 1 2 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
11 4 4 2 -1.</_>
<_>
12 4 2 2 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
11 5 4 1 -1.</_>
<_>
12 5 2 1 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
11 5 4 8 -1.</_>
<_>
11 5 4 4 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
11 6 6 8 -1.</_>
<_>
11 6 6 4 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
11 8 1 6 -1.</_>
<_>
11 8 1 3 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
11 8 6 3 -1.</_>
<_>
14 8 3 3 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
11 8 4 8 -1.</_>
<_>
11 10 4 4 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
11 9 2 2 -1.</_>
<_>
11 10 2 1 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
11 9 2 3 -1.</_>
<_>
11 10 2 1 3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
11 10 8 2 -1.</_>
<_>
13 12 4 2 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
11 14 3 3 -1.</_>
<_>
12 14 1 3 3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
11 15 1 2 -1.</_>
<_>
11 16 1 1 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
11 15 3 1 -1.</_>
<_>
12 15 1 1 3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
12 0 1 3 -1.</_>
<_>
12 1 1 1 3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
12 3 2 10 -1.</_>
<_>
12 3 1 10 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
12 3 4 5 -1.</_>
<_>
12 3 2 5 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
12 5 3 1 -1.</_>
<_>
13 6 1 1 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
12 5 3 4 -1.</_>
<_>
13 6 1 4 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
12 5 6 5 -1.</_>
<_>
15 5 3 5 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
12 5 6 8 -1.</_>
<_>
12 5 6 4 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
12 9 1 4 -1.</_>
<_>
12 10 1 2 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
12 9 2 3 -1.</_>
<_>
12 10 2 1 3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
12 11 3 1 -1.</_>
<_>
13 12 1 1 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
12 15 1 2 -1.</_>
<_>
12 16 1 1 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
12 17 6 3 -1.</_>
<_>
14 17 2 3 3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
13 0 2 2 -1.</_>
<_>
13 0 2 1 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
13 0 7 2 -1.</_>
<_>
13 0 7 1 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
13 1 4 11 -1.</_>
<_>
13 1 2 11 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
13 3 6 3 -1.</_>
<_>
15 4 2 1 9.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
13 5 3 1 -1.</_>
<_>
14 6 1 1 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
13 5 1 9 -1.</_>
<_>
10 8 1 3 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
13 5 2 8 -1.</_>
<_>
11 7 2 4 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
13 6 6 5 -1.</_>
<_>
15 6 2 5 3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
13 6 6 8 -1.</_>
<_>
13 6 6 4 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
13 7 6 4 -1.</_>
<_>
15 9 2 4 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
13 12 1 6 -1.</_>
<_>
13 12 1 3 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
13 12 6 2 -1.</_>
<_>
15 14 2 2 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
13 18 2 2 -1.</_>
<_>
14 18 1 2 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
14 0 6 2 -1.</_>
<_>
14 0 6 1 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
14 0 6 3 -1.</_>
<_>
13 1 6 1 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
14 2 2 10 -1.</_>
<_>
14 2 2 5 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
14 4 3 2 -1.</_>
<_>
15 5 1 2 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
14 4 6 16 -1.</_>
<_>
17 4 3 16 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
14 5 6 14 -1.</_>
<_>
17 5 3 14 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
14 6 4 8 -1.</_>
<_>
14 6 4 4 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
14 7 2 7 -1.</_>
<_>
14 7 1 7 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
14 9 6 6 -1.</_>
<_>
14 12 6 3 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
14 11 6 2 -1.</_>
<_>
16 11 2 2 3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
14 11 6 3 -1.</_>
<_>
16 11 2 3 3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
14 16 2 2 -1.</_>
<_>
14 16 1 2 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
15 0 4 3 -1.</_>
<_>
14 1 4 1 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
15 1 5 3 -1.</_>
<_>
14 2 5 1 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
15 2 4 6 -1.</_>
<_>
16 2 2 6 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
15 5 3 1 -1.</_>
<_>
16 6 1 1 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
15 6 4 1 -1.</_>
<_>
16 6 2 1 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
15 7 2 4 -1.</_>
<_>
15 7 1 2 2.</_>
<_>
16 9 1 2 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
15 9 4 4 -1.</_>
<_>
16 9 2 4 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
15 10 4 5 -1.</_>
<_>
16 11 2 5 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
15 14 3 3 -1.</_>
<_>
16 15 1 3 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
15 18 5 2 -1.</_>
<_>
15 19 5 1 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 0 4 2 -1.</_>
<_>
16 1 4 1 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 0 4 4 -1.</_>
<_>
16 0 4 2 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
16 4 1 2 -1.</_>
<_>
16 4 1 1 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
16 4 4 2 -1.</_>
<_>
16 4 2 2 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
16 4 4 10 -1.</_>
<_>
18 4 2 10 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 5 3 3 -1.</_>
<_>
17 5 1 3 3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 5 4 9 -1.</_>
<_>
18 5 2 9 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 6 3 2 -1.</_>
<_>
17 6 1 2 3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 6 4 4 -1.</_>
<_>
18 6 2 4 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 6 4 6 -1.</_>
<_>
18 6 2 6 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 6 4 7 -1.</_>
<_>
18 6 2 7 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 8 3 2 -1.</_>
<_>
17 8 1 2 3.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 8 4 7 -1.</_>
<_>
18 8 2 7 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 9 4 5 -1.</_>
<_>
17 10 2 5 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
16 9 3 6 -1.</_>
<_>
17 10 1 6 3.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
16 10 4 6 -1.</_>
<_>
17 11 2 6 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
16 10 4 4 -1.</_>
<_>
18 10 2 4 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 10 4 6 -1.</_>
<_>
16 10 2 6 2.</_></rects>
<tilted>1</tilted></_>
<_>
<rects>
<_>
16 11 4 2 -1.</_>
<_>
17 11 2 2 2.</_></rects>
<tilted>0</tilted></_>
<_>
<rects>
<_>
16 11 4 | |
not False and keypoints is not None:
# self.data['keypoints'] = keypoints
if 'kp_classes' not in locals():
kp_classes = list(range(self.data['keypoints'].shape[1])) # HACK
self.meta['kp_classes'] = kp_classes
self.meta['classes'] = classes
return self
# --- Data Properties ---
@property
def class_probs(self):
return self.data['class_probs']
@property
def offset(self):
return self.data['offset']
@property
def diameter(self):
return self.data['diameter']
# --- Meta Properties ---
@property
def img_dims(self):
return self.meta.get('img_dims', None)
@property
def tf_data_to_img(self):
return self.meta.get('tf_data_to_img', None)
@property
def classes(self):
return self.meta.get('classes', None)
# ---
def numpy(self):
"""
Converts underlying data to numpy arrays
"""
newdata = {}
for key, val in self.data.items():
if val is None:
newval = val
else:
newval = kwarray.ArrayAPI.numpy(val)
newdata[key] = newval
newself = self.__class__(newdata, self.meta)
return newself
def tensor(self, device=ub.NoParam):
"""
Converts underlying data to torch tensors
"""
newdata = {}
for key, val in self.data.items():
if val is None:
newval = val
else:
newval = kwarray.ArrayAPI.tensor(val, device=device)
newdata[key] = newval
newself = self.__class__(newdata, self.meta)
return newself
def _prob_to_dets(probs, diameter=None, offset=None, class_probs=None,
keypoints=None, min_score=0.01, num_min=10,
max_dims=None, min_dims=None):
"""
Directly convert a one-channel probability map into a Detections object.
Helper for Heatmap.detect
It does this by converting each pixel above a threshold in a probability
map to a detection with a specified diameter.
Args:
probs (ArrayLike[H, W]) a one-channel probability map indicating the
liklihood that each particular pixel should be detected as an
object.
diameter (ArrayLike[2, H, W] | Tuple):
H, W sizes for the bounding box at each pixel location.
If passed as a tuple, then all boxes receive that diameter.
offset (Tuple | ArrayLike[2, H, W], default=0):
Y, X offsets from the pixel location to the bounding box center.
If passed as a tuple, then all boxes receive that offset.
class_probs (ArrayLike[C, H, W], optional):
probabilities for each class at each pixel location.
If specified, this will populate the `probs` attribute of the
returned Detections object.
keypoints (ArrayLike[2, K, H, W], optional):
Keypoint predictions for all keypoint classes
min_score (float, default=0.1): probability threshold required
for a pixel to be converted into a detection.
num_min (int, default=10):
always return at least `nmin` of the highest scoring detections
even if they aren't above the `min_score` threshold.
Returns:
kwimage.Detections: raw detections. It is the users responsbility to
run non-max suppression on these results to remove duplicate
detections.
Example:
>>> # xdoctest: +REQUIRES(module:torch)
>>> rng = np.random.RandomState(0)
>>> probs = rng.rand(3, 3).astype(np.float32)
>>> min_score = .5
>>> diameter = [10, 10]
>>> dets = _prob_to_dets(probs, diameter, min_score=min_score)
>>> assert dets.boxes.data.dtype.kind == 'f'
>>> assert len(dets) == 9
>>> dets = _prob_to_dets(torch.FloatTensor(probs), diameter, min_score=min_score)
>>> assert dets.boxes.data.dtype.is_floating_point
>>> assert len(dets) == 9
Example:
>>> # xdoctest: +REQUIRES(module:torch)
>>> import kwimage
>>> from kwimage.structs.heatmap import *
>>> from kwimage.structs.heatmap import _prob_to_dets
>>> heatmap = kwimage.Heatmap.random(rng=0, dims=(3, 3), keypoints=True)
>>> # Try with numpy
>>> min_score = .5
>>> dets = _prob_to_dets(heatmap.class_probs[0], heatmap.diameter,
>>> heatmap.offset, heatmap.class_probs,
>>> heatmap.data['keypoints'],
>>> min_score)
>>> assert dets.boxes.data.dtype.kind == 'f'
>>> assert 'keypoints' in dets.data
>>> dets_np = dets
>>> # Try with torch
>>> heatmap = heatmap.tensor()
>>> dets = _prob_to_dets(heatmap.class_probs[0], heatmap.diameter,
>>> heatmap.offset, heatmap.class_probs,
>>> heatmap.data['keypoints'],
>>> min_score)
>>> assert dets.boxes.data.dtype.is_floating_point
>>> assert len(dets) == len(dets_np)
>>> dets_torch = dets
>>> assert np.all(dets_torch.numpy().boxes.data == dets_np.boxes.data)
Ignore:
import kwil
kwil.autompl()
dets.draw(setlim=True, radius=.1)
Example:
>>> heatmap = Heatmap.random(rng=0, dims=(3, 3), diameter=1)
>>> probs = heatmap.class_probs[0]
>>> diameter = heatmap.diameter
>>> offset = heatmap.offset
>>> class_probs = heatmap.class_probs
>>> min_score = 0.5
>>> dets = _prob_to_dets(probs, diameter, offset, class_probs, None, min_score)
"""
impl = kwarray.ArrayAPI.impl(probs)
if diameter is None:
diameter = 1
if offset is None:
offset = 0
diameter_is_uniform = tuple(getattr(diameter, 'shape', []))[1:] != tuple(probs.shape)
offset_is_uniform = tuple(getattr(offset, 'shape', []))[1:] != tuple(probs.shape)
if diameter_is_uniform:
if hasattr(diameter, 'shape'):
if len(diameter.shape) > 2:
raise Exception('Trailing diameter shape={} does not agree with probs.shape={}'.format(
diameter.shape, probs.shape))
if not ub.iterable(diameter):
diameter = [diameter, diameter]
if offset_is_uniform:
if not ub.iterable(offset):
offset = impl.asarray([offset, offset])
flags = probs > min_score
if not diameter_is_uniform:
if max_dims is not None:
max_dims = max_dims if ub.iterable(max_dims) else (max_dims, max_dims)
max_height, max_width = max_dims
if max_height is not None:
flags &= diameter[0] <= max_height
if max_width is not None:
flags &= diameter[1] <= max_width
if min_dims is not None:
min_dims = min_dims if ub.iterable(min_dims) else (min_dims, min_dims)
min_height, min_width = min_dims
if min_height is not None:
flags &= diameter[0] >= min_height
if min_width is not None:
flags &= diameter[1] >= min_width
# Ensure that some detections are returned even if none are above the
# threshold.
if num_min is not None:
numel = impl.numel(flags)
if flags.sum() < num_min:
if impl.is_tensor:
topxs = probs.view(-1).argsort()[max(0, numel - num_min):numel]
flags.view(-1)[topxs] = 1
else:
idxs = kwarray.argmaxima(probs, num=num_min, ordered=False)
# idxs = probs.argsort(axis=None)[-num_min:]
flags.ravel()[idxs] = True
yc, xc = impl.nonzero(flags)
yc_ = impl.astype(yc, np.float32)
xc_ = impl.astype(xc, np.float32)
if diameter_is_uniform:
h = impl.full_like(yc_, fill_value=diameter[0])
w = impl.full_like(xc_, fill_value=diameter[1])
else:
h = impl.astype(diameter[0][flags], np.float32)
w = impl.astype(diameter[1][flags], np.float32)
cxywh = impl.cat([xc_[:, None], yc_[:, None], w[:, None], h[:, None]], axis=1)
import kwimage
ltrb = kwimage.Boxes(cxywh, 'cxywh').toformat('ltrb')
scores = probs[flags]
# TODO:
# Can we extract the detected segmentation mask/poly here as well?
dets = kwimage.Detections(boxes=ltrb, scores=scores)
# Get per-class probs for each detection
if class_probs is not None:
det_probs = impl.T(class_probs[:, yc, xc])
dets.data['probs'] = det_probs
if offset is not None:
if offset_is_uniform:
det_dxdy = offset[[1, 0]]
else:
det_dxdy = impl.T(offset[:, yc, xc][[1, 0]])
dets.boxes.translate(det_dxdy, inplace=True)
if keypoints is not None:
# Take keypoint predictions for each remaining detection
det_kpts_xy = impl.contiguous(impl.T(keypoints[:, :, yc, xc][[1, 0]]))
# Translate keypoints to absolute coordinates
det_kpts_xy[..., 0] += xc_[:, None]
det_kpts_xy[..., 1] += yc_[:, None]
# The shape of det_kpts_xy is [N, K, 2]
# TODO: need to package kp_classes as well
# TODO: can we make this faster? It is bottlenecking, in this instance
# the points list wont be jagged, so perhaps we can use a denser data
# structure?
if 1:
# Try using a dense homogenous data structure
det_coords = kwimage.Coords(det_kpts_xy)
det_kpts = kwimage.Points({'xy': det_coords})
else:
# Using a jagged non-homogenous data structure is slow
det_coords = [
kwimage.Coords(xys) for xys in det_kpts_xy
]
det_kpts = kwimage.PointsList([
kwimage.Points({'xy': xy}) for xy in det_coords
])
dets.data['keypoints'] = det_kpts
assert len(dets.scores.shape) == 1
return dets
def smooth_prob(prob, k=3, inplace=False, eps=1e-9):
"""
Smooths the probability map, but preserves the magnitude of the peaks.
Notes:
even if inplace is true, we still need to make a copy of the input
array, however, we do ensure that it is cleaned up before we leave the
function scope.
sigma=0.8 @ k=3, sigma=1.1 @ k=5, sigma=1.4 @ k=7
"""
sigma = 0.3 * ((k - 1) * 0.5 - 1) + 0.8 # opencv formula
blur = cv2.GaussianBlur(prob, (k, k), sigma)
# Shift and scale the intensities so the maximum and minimum
# pixel value in the blurred image match the original image
minpos = np.unravel_index(blur.argmin(), blur.shape)
maxpos = np.unravel_index(blur.argmax(), blur.shape)
shift = prob[minpos] - blur[minpos]
scale = prob[maxpos] / np.maximum((blur[maxpos] + shift), eps)
if inplace:
prob[:] = blur
blur = prob
np.add(blur, shift, out=blur)
np.multiply(blur, scale, out=blur)
return blur
def _remove_translation(tf):
"""
Removes the translation component of a transform
TODO:
- [ ] Is this possible in more general cases? E.g. projective transforms?
"""
if isinstance(tf, skimage.transform.AffineTransform):
tf_notrans = skimage.transform.AffineTransform(
scale=tf.scale, rotation=tf.rotation, shear=tf.shear)
elif isinstance(tf, skimage.transform.SimilarityTransform):
tf_notrans = skimage.transform.SimilarityTransform(
scale=tf.scale, rotation=tf.rotation)
elif isinstance(tf, skimage.transform.EuclideanTransform):
tf_notrans = skimage.transform.EuclideanTransform(
scale=tf.scale, rotation=tf.rotation)
else:
raise TypeError(tf)
return tf_notrans
def _gmean(a, axis=0, clobber=False):
"""
Compute the geometric mean along the specified axis.
Modification of the scipy.mstats method to be more memory efficient
Example
>>> rng = np.random.RandomState(0)
>>> C, H, W = 8, 32, 32
>>> axis = 0
>>> a = rng.rand(2, C, H, W)
>>> _gmean(a)
"""
assert isinstance(a, np.ndarray)
if clobber:
# NOTE: we reuse (a), we clobber the input array!
log_a = np.log(a, out=a)
else:
log_a = np.log(a)
# attempt to reuse memory | |
try:
from TACT import logger
except ImportError:
pass
import pandas as pd
import sys
import matplotlib.pyplot as plt
plt.ioff() # setting to non-interactive
import numpy as np
import sys
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
class Adjustments:
"""
document parameters
"""
def __init__(self, raw_data="", adjustments_list="", baseResultsLists=""):
self.raw_data = raw_data
self.adjusted_data = pd.DataFrame()
self.results_stats = (
[]
) # make this a dictionary of results with adjustment_list items as keys
def get_regression(self, x, y):
"""
Compute linear regression of data -> need to deprecate this function for get_modelRegression..
"""
df = pd.DataFrame()
df["x"] = x
df["y"] = y
df = df.dropna()
feature_name = "x"
target_name = "y"
data, target = df[[feature_name]], df[target_name]
if len(df) > 1:
x = df["x"].astype(float)
y = df["y"].astype(float)
lm = LinearRegression()
lm.fit(data, target)
predict = lm.predict(data)
result = [lm.coef_[0], lm.intercept_] # slope and intercept?
result.append(lm.score(data, target)) # r score?
result.append(abs((x - y).mean())) # mean diff?
mse = mean_squared_error(target, predict, multioutput="raw_values")
rmse = np.sqrt(mse)
result.append(mse[0])
result.append(rmse[0])
else:
result = [None, None, None, None, None, None]
result = [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
# results order: m, c, r2, mean difference, mse, rmse
# logger.debug(result)
return result
def post_adjustment_stats(self, inputdata, results, ref_col, TI_col):
if isinstance(inputdata, pd.DataFrame):
fillEmpty = False
if ref_col in inputdata.columns and TI_col in inputdata.columns:
model_adjTI = self.get_regression(inputdata[ref_col], inputdata[TI_col])
name1 = "TI_regression_" + TI_col + "_" + ref_col
results.loc[name1, ["m"]] = model_adjTI[0]
results.loc[name1, ["c"]] = model_adjTI[1]
results.loc[name1, ["rsquared"]] = model_adjTI[2]
results.loc[name1, ["difference"]] = model_adjTI[3]
results.loc[name1, ["mse"]] = model_adjTI[4]
results.loc[name1, ["rmse"]] = model_adjTI[5]
else:
fillEmpty = True
else:
fillEmpty = True
if fillEmpty:
name1 = "TI_regression_" + TI_col + "_" + ref_col
results.loc[name1, ["m"]] = "NaN"
results.loc[name1, ["c"]] = "NaN"
results.loc[name1, ["rsquared"]] = "NaN"
results.loc[name1, ["difference"]] = "NaN"
results.loc[name1, ["mse"]] = "NaN"
results.loc[name1, ["rmse"]] = "NaN"
return results
def perform_SS_S_adjustment(self, inputdata):
"""
Note: Representative TI computed with original RSD_SD
"""
results = pd.DataFrame(
columns=[
"sensor",
"height",
"adjustment",
"m",
"c",
"rsquared",
"difference",
"mse",
"rmse",
]
)
inputdata_train = inputdata[inputdata["split"] == True].copy()
inputdata_test = inputdata[inputdata["split"] == False].copy()
if inputdata.empty or len(inputdata) < 2:
results = self.post_adjustment_stats(
[None], results, "Ref_TI", "adjTI_RSD_TI"
)
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
m = np.NaN
c = np.NaN
inputdata = False
else:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ref_TI"]
full["RSD_TI"] = inputdata_test["RSD_TI"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ref_TI", "adjTI_RSD_TI"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI"], inputdata_train["Ref_TI"]
)
m = model[0]
c = model[1]
RSD_TI = inputdata_test["RSD_TI"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ref_TI", "adjTI_RSD_TI"
)
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ane_TI_Ht1"]
full["RSD_TI"] = inputdata_test["RSD_TI_Ht1"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI"], inputdata_train["Ref_TI"]
)
RSD_TI = inputdata_test["RSD_TI_Ht1"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht1"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ane_TI_Ht2"]
full["RSD_TI"] = inputdata_test["RSD_TI_Ht2"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI_Ht2"], inputdata_train["Ane_TI_Ht2"]
)
RSD_TI = inputdata_test["RSD_TI_Ht2"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht2"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ane_TI_Ht3"]
full["RSD_TI"] = inputdata_test["RSD_TI_Ht3"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI_Ht3"], inputdata_train["Ane_TI_Ht3"]
)
RSD_TI = inputdata_test["RSD_TI_Ht3"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht3"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
full = pd.DataFrame()
full["Ref_TI"] = inputdata_test["Ane_TI_Ht4"]
full["RSD_TI"] = inputdata_test["RSD_TI_Ht4"]
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(
inputdata_train["RSD_TI_Ht4"], inputdata_train["Ane_TI_Ht4"]
)
RSD_TI = inputdata_test["RSD_TI_Ht4"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht4"] = RSD_TI
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
results["adjustment"] = ["SS-S"] * len(results)
results = results.drop(columns=["sensor", "height"])
return inputdata_test, results, m, c
def perform_SS_SF_adjustment(self, inputdata):
results = pd.DataFrame(
columns=[
"sensor",
"height",
"adjustment",
"m",
"c",
"rsquared",
"difference",
"mse",
"rmse",
]
)
inputdata_train = inputdata[inputdata["split"] == True].copy()
inputdata_test = inputdata[inputdata["split"] == False].copy()
if inputdata.empty or len(inputdata) < 2:
results = self.post_adjustment_stats(
[None], results, "Ref_TI", "adjTI_RSD_TI"
)
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
m = np.NaN
c = np.NaN
inputdata = False
else:
filtered_Ref_TI = inputdata_train["Ref_TI"][inputdata_train["RSD_TI"] < 0.3]
filtered_RSD_TI = inputdata_train["RSD_TI"][inputdata_train["RSD_TI"] < 0.3]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None],
results,
"Ref_TI",
"adjTI_RSD_TI",
)
m = np.NaN
c = np.NaN
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
m = model[0]
c = model[1]
RSD_TI = inputdata_test["RSD_TI"].copy()
RSD_TI = (float(model[0]) * RSD_TI) + float(model[1])
inputdata_test["adjTI_RSD_TI"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD"]
)
results = self.post_adjustment_stats(
inputdata_test, results, "Ref_TI", "adjTI_RSD_TI"
)
if "Ane_TI_Ht1" in inputdata.columns and "RSD_TI_Ht1" in inputdata.columns:
filtered_Ref_TI = inputdata_train["Ane_TI_Ht1"][
inputdata_train["Ane_TI_Ht1"] < 0.3
]
filtered_RSD_TI = inputdata_train["RSD_TI_Ht1"][
inputdata_train["RSD_TI_Ht1"] < 0.3
]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
RSD_TI = inputdata_test["RSD_TI_Ht1"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht1"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI_Ht1"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD_Ht1"]
)
results = self.post_adjustment_stats(
inputdata, results, "Ane_TI_Ht1", "adjTI_RSD_TI_Ht1"
)
if "Ane_TI_Ht2" in inputdata.columns and "RSD_TI_Ht2" in inputdata.columns:
filtered_Ref_TI = inputdata_train["Ane_TI_Ht2"][
inputdata_train["Ane_TI_Ht2"] < 0.3
]
filtered_RSD_TI = inputdata_train["RSD_TI_Ht2"][
inputdata_train["RSD_TI_Ht2"] < 0.3
]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
RSD_TI = inputdata_test["RSD_TI_Ht2"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht2"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI_Ht2"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD_Ht2"]
)
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht2", "adjTI_RSD_TI_Ht2"
)
if "Ane_TI_Ht3" in inputdata.columns and "RSD_TI_Ht3" in inputdata.columns:
filtered_Ref_TI = inputdata_train["Ane_TI_Ht3"][
inputdata_train["Ane_TI_Ht3"] < 0.3
]
filtered_RSD_TI = inputdata_train["RSD_TI_Ht3"][
inputdata_train["RSD_TI_Ht3"] < 0.3
]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
RSD_TI = inputdata_test["RSD_TI_Ht3"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht3"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI_Ht3"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD_Ht3"]
)
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht3", "adjTI_RSD_TI_Ht3"
)
if "Ane_TI_Ht4" in inputdata.columns and "RSD_TI_Ht4" in inputdata.columns:
filtered_Ref_TI = inputdata_train["Ane_TI_Ht4"][
inputdata_train["Ane_TI_Ht4"] < 0.3
]
filtered_RSD_TI = inputdata_train["RSD_TI_Ht4"][
inputdata_train["RSD_TI_Ht4"] < 0.3
]
full = pd.DataFrame()
full["filt_Ref_TI"] = filtered_Ref_TI
full["filt_RSD_TI"] = filtered_RSD_TI
full = full.dropna()
if len(full) < 2:
results = self.post_adjustment_stats(
[None], results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
else:
model = self.get_regression(filtered_RSD_TI, filtered_Ref_TI)
RSD_TI = inputdata_test["RSD_TI_Ht4"].copy()
RSD_TI = (model[0] * RSD_TI) + model[1]
inputdata_test["adjTI_RSD_TI_Ht4"] = RSD_TI
inputdata_test["adjRepTI_RSD_RepTI_Ht4"] = (
RSD_TI + 1.28 * inputdata_test["RSD_SD_Ht4"]
)
results = self.post_adjustment_stats(
inputdata_test, results, "Ane_TI_Ht4", "adjTI_RSD_TI_Ht4"
)
results["adjustment"] = ["SS-SF"] * len(results)
results | |
img_inds = np.arange(len(image_paths))
for j in range(num_batches):
batch_inds = img_inds[j * batch_size:(j + 1) * batch_size]
batch_images, _, batch_lms_small = \
load_images_landmarks_maps(
self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size,
c_dim=self.c_dim, num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,
save_landmarks=self.compute_nme)
batch_maps_small_pred = session.run(self.pred_hm_p, {self.images: batch_images})
batch_pred_landmarks = batch_heat_maps_to_landmarks(
batch_maps_small_pred, batch_size=batch_size, image_size=int(self.image_size/4),
num_landmarks=self.num_landmarks)
if j == 0:
all_pred_landmarks = batch_pred_landmarks.copy()
all_gt_landmarks = batch_lms_small.copy()
else:
all_pred_landmarks = np.concatenate((all_pred_landmarks,batch_pred_landmarks),0)
all_gt_landmarks = np.concatenate((all_gt_landmarks, batch_lms_small), 0)
reminder = len(image_paths)-num_batches*batch_size
if reminder > 0:
reminder_inds = img_inds[-reminder:]
batch_images, _, batch_lms_small = \
load_images_landmarks_maps(
self.img_menpo_list, reminder_inds, primary=True, image_size=self.image_size,
c_dim=self.c_dim, num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,
save_landmarks=self.compute_nme)
batch_maps_small_pred = session.run(self.pred_hm_p, {self.images: batch_images})
batch_pred_landmarks = batch_heat_maps_to_landmarks(
batch_maps_small_pred, batch_size=reminder, image_size=int(self.image_size/4),
num_landmarks=self.num_landmarks)
all_pred_landmarks = np.concatenate((all_pred_landmarks, batch_pred_landmarks), 0)
all_gt_landmarks = np.concatenate((all_gt_landmarks, batch_lms_small), 0)
return all_pred_landmarks, all_gt_landmarks
def predict_landmarks_in_batches_loaded(self, images, session):
num_images = int(images.shape[0])
num_batches = int(1.*num_images/self.batch_size)
if num_batches == 0:
batch_size = num_images
num_batches = 1
else:
batch_size = self.batch_size
for j in range(num_batches):
batch_images = images[j * batch_size:(j + 1) * batch_size,:,:,:]
batch_maps_small_pred = session.run(self.pred_hm_p, {self.images: batch_images})
if self.allocate_once:
batch_heat_maps_to_landmarks_alloc_once(
batch_maps=batch_maps_small_pred,
batch_landmarks=self.valid_landmarks_pred[j * batch_size:(j + 1) * batch_size, :, :],
batch_size=batch_size, image_size=int(self.image_size/4), num_landmarks=self.num_landmarks)
else:
batch_pred_landmarks = batch_heat_maps_to_landmarks(
batch_maps_small_pred, batch_size=batch_size, image_size=int(self.image_size/4),
num_landmarks=self.num_landmarks)
if j == 0:
all_pred_landmarks = batch_pred_landmarks.copy()
else:
all_pred_landmarks = np.concatenate((all_pred_landmarks, batch_pred_landmarks), 0)
reminder = num_images-num_batches*batch_size
if reminder > 0:
batch_images = images[-reminder:, :, :, :]
batch_maps_small_pred = session.run(self.pred_hm_p, {self.images: batch_images})
if self.allocate_once:
batch_heat_maps_to_landmarks_alloc_once(
batch_maps=batch_maps_small_pred,
batch_landmarks=self.valid_landmarks_pred[-reminder:, :, :],
batch_size=reminder, image_size=int(self.image_size/4), num_landmarks=self.num_landmarks)
else:
batch_pred_landmarks = batch_heat_maps_to_landmarks(
batch_maps_small_pred, batch_size=reminder, image_size=int(self.image_size/4),
num_landmarks=self.num_landmarks)
all_pred_landmarks = np.concatenate((all_pred_landmarks, batch_pred_landmarks), 0)
if not self.allocate_once:
return all_pred_landmarks
def create_summary_ops(self):
self.batch_summary_op = tf.summary.scalar('l_total', self.total_loss)
if self.compute_nme:
l_nme = tf.summary.scalar('l_nme', self.nme_loss)
self.batch_summary_op = tf.summary.merge([self.batch_summary_op, l_nme])
if self.log_histograms:
var_summary = [tf.summary.histogram(var.name, var) for var in tf.trainable_variables()]
grads = tf.gradients(self.total_loss, tf.trainable_variables())
grads = list(zip(grads, tf.trainable_variables()))
grad_summary = [tf.summary.histogram(var.name + '/grads', grad) for grad, var in grads]
activ_summary = [tf.summary.histogram(layer.name, layer) for layer in self.all_layers]
self.batch_summary_op = tf.summary.merge([self.batch_summary_op, var_summary, grad_summary, activ_summary])
if self.augment_texture and self.log_artistic_augmentation_probs:
p_texture_summary = tf.summary.scalar('p_texture', self.p_texture_log)
self.batch_summary_op = tf.summary.merge([self.batch_summary_op, p_texture_summary])
if self.augment_geom and self.log_artistic_augmentation_probs:
p_geom_summary = tf.summary.scalar('p_geom', self.p_geom_log)
self.batch_summary_op = tf.summary.merge([self.batch_summary_op, p_geom_summary])
if self.valid_size > 0 and self.compute_nme:
self.valid_summary = tf.summary.scalar('valid_l_nme', self.valid_nme_loss)
if self.sample_to_log:
img_map_summary =tf.summary.image('compare_map_to_gt',self.log_image_map)
if self.sample_per_channel:
map_channels_summary = tf.summary.image('compare_map_channels_to_gt', self.log_map_channels)
self.img_summary = tf.summary.merge([img_map_summary, map_channels_summary])
else:
self.img_summary = img_map_summary
if self.valid_size >= self.sample_grid:
img_map_summary_valid = tf.summary.image('compare_map_to_gt_valid', self.log_image_map)
if self.sample_per_channel:
map_channels_summary_valid = tf.summary.image('compare_map_channels_to_gt_valid', self.log_map_channels)
self.img_summary_valid = tf.summary.merge([img_map_summary_valid, map_channels_summary_valid])
else:
self.img_summary_valid = img_map_summary_valid
def eval(self):
self.add_placeholders()
# build model
self.build_model()
self.create_loss_ops()
if self.debug:
self.img_menpo_list = self.img_menpo_list[:np.min([self.debug_data_size, len(self.img_menpo_list)])]
num_images = len(self.img_menpo_list)
img_inds = np.arange(num_images)
sample_iter = np.ceil(1. * num_images / self.sample_grid).astype('int')
with tf.Session(config=self.config) as sess:
# load trained parameters
print ('loading test model...')
saver = tf.train.Saver()
saver.restore(sess, self.test_model_path)
_, model_name = os.path.split(self.test_model_path)
gt_provided = self.img_menpo_list[0].has_landmarks # check if GT landmarks provided
for i in range(sample_iter):
batch_inds = img_inds[i * self.sample_grid:(i + 1) * self.sample_grid]
if not gt_provided:
batch_images = load_images(self.img_menpo_list, batch_inds, image_size=self.image_size,
c_dim=self.c_dim, scale=self.scale)
batch_maps_small_pred = sess.run(self.pred_hm_p, {self.images: batch_images})
batch_maps_gt = None
else:
# TODO: add option for approx maps + allocate once
batch_images, batch_maps_gt, _ = \
load_images_landmarks_maps(
self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size,
c_dim=self.c_dim, num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,
save_landmarks=False)
batch_maps_small_pred = sess.run(self.pred_hm_p, {self.images: batch_images})
sample_path_imgs = os.path.join(
self.save_sample_path, model_name +'-'+ self.test_data+'-sample-%d-to-%d-1.png' % (
i * self.sample_grid, (i + 1) * self.sample_grid))
merged_img = merge_images_landmarks_maps_gt(
batch_images.copy(), batch_maps_small_pred, batch_maps_gt, image_size=self.image_size,
num_landmarks=self.num_landmarks, num_samples=self.sample_grid, scale=self.scale, circle_size=0,
fast=self.fast_img_gen)
scipy.misc.imsave(sample_path_imgs, merged_img)
if self.sample_per_channel:
map_per_channel = map_comapre_channels(
batch_images.copy(), batch_maps_small_pred,batch_maps_gt, image_size=int(self.image_size/4),
num_landmarks=self.num_landmarks, scale=self.scale)
sample_path_channels = os.path.join(
self.save_sample_path, model_name + '-' + self.test_data + '-sample-%d-to-%d-3.png' % (
i * self.sample_grid, (i + 1) * self.sample_grid))
scipy.misc.imsave(sample_path_channels, map_per_channel)
print ('saved %s' % sample_path_imgs)
if self.compute_nme and self.test_data in ['full', 'challenging', 'common', 'training', 'test']:
print ('\n Calculating NME on: ' + self.test_data + '...')
pred_lms, lms_gt = self.predict_landmarks_in_batches(self.img_menpo_list, sess)
nme = sess.run(self.nme_loss, {self.pred_lms_small: pred_lms, self.lms_small: lms_gt})
print ('NME on ' + self.test_data + ': ' + str(nme))
def train(self):
# set random seed
tf.set_random_seed(1234)
np.random.seed(1234)
# build a graph
# add placeholders
self.add_placeholders()
# build model
self.build_model()
# create loss ops
self.create_loss_ops()
# create summary ops
self.create_summary_ops()
# create optimizer and training op
global_step = tf.Variable(0, trainable=False)
lr = tf.train.exponential_decay(self.learning_rate,global_step, self.step, self.gamma, staircase=True)
if self.adam_optimizer:
optimizer = tf.train.AdamOptimizer(lr)
else:
optimizer = tf.train.MomentumOptimizer(lr, self.momentum)
train_op = optimizer.minimize(self.total_loss,global_step=global_step)
# TODO: remove
if self.approx_maps_gpu: # create heat-maps using tf convolution. use only with GPU support!
self.build_hm_generator()
with tf.Session(config=self.config) as sess:
tf.global_variables_initializer().run()
# load pre trained weights if load_pretrain==True
if self.load_pretrain:
print
print('*** loading pre-trained weights from: '+self.pre_train_path+' ***')
loader = tf.train.Saver()
loader.restore(sess, self.pre_train_path)
print("*** Model restore finished, current global step: %d" % global_step.eval())
# for fine-tuning, choose reset_training_op==True. when resuming training, reset_training_op==False
if self.reset_training_op:
print ("resetting optimizer and global step")
opt_var_list = [optimizer.get_slot(var, name) for name in optimizer.get_slot_names()
for var in tf.global_variables() if optimizer.get_slot(var, name) is not None]
opt_var_list_init = tf.variables_initializer(opt_var_list)
opt_var_list_init.run()
sess.run(global_step.initializer)
# create model saver and file writer
summary_writer = tf.summary.FileWriter(logdir=self.save_log_path, graph=tf.get_default_graph())
saver = tf.train.Saver()
print
print('*** Start Training ***')
# initialize some variables before training loop
resume_step = global_step.eval()
num_train_images = len(self.img_menpo_list)
batches_in_epoch = int(float(num_train_images) / float(self.batch_size))
epoch = int(resume_step / batches_in_epoch)
img_inds = self.epoch_inds_shuffle[epoch, :]
p_texture = self.p_texture
p_geom = self.p_geom
artistic_reload = False
basic_reload = True
log_valid = True
log_valid_images = True
if self.allocate_once:
batch_images = np.zeros([self.batch_size, self.image_size, self.image_size, self.c_dim]).astype('float32')
batch_lms_small = np.zeros([self.batch_size, self.num_landmarks, 2]).astype('float32')
batch_lms_small_pred = np.zeros([self.batch_size, self.num_landmarks, 2]).astype('float32')
if self.approx_maps_gpu:
batch_hm_base_small = np.zeros((self.batch_size * self.num_landmarks,
int(self.image_size/4), int(self.image_size/4), 1)).astype('float32')
else:
batch_maps_small = np.zeros((self.batch_size, int(self.image_size/4),
int(self.image_size/4), self.num_landmarks)).astype('float32')
if self.approx_maps_cpu:
gaussian_filt = create_gaussian_filter(sigma=self.sigma, win_mult=self.win_mult)
for step in range(resume_step, self.train_iter):
j = step % batches_in_epoch # j==0 if we finished an epoch
if step > resume_step and j == 0: # if we finished an epoch and this isn't the first step
epoch += 1
img_inds = self.epoch_inds_shuffle[epoch, :] # get next shuffled image inds
artistic_reload = True
log_valid = True
log_valid_images = True
if self.use_epoch_data:
epoch_dir = os.path.join(self.epoch_data_dir, str(epoch))
self.img_menpo_list = load_menpo_image_list(
self.img_path, train_crop_dir=epoch_dir, img_dir_ns=None, mode=self.mode,
bb_dictionary=self.bb_dictionary, image_size=self.image_size, test_data=self.test_data,
augment_basic=False, augment_texture=False, augment_geom=False)
# add basic augmentation (if basic_start > 0 and augment_basic is True)
if basic_reload and (epoch >= self.basic_start) and self.basic_start > 0 and self.augment_basic:
basic_reload = False
self.img_menpo_list = reload_menpo_image_list(
self.img_path, self.train_crop_dir, self.img_dir_ns, self.mode, self.train_inds,
image_size=self.image_size, augment_basic=self.augment_basic,
augment_texture=(self.augment_texture and epoch >= self.artistic_start), p_texture=p_texture,
augment_geom=(self.augment_geom and epoch >= self.artistic_start), p_geom=p_geom)
print ("****** adding basic augmentation ******")
# increase artistic augmentation probability
if ((epoch % self.artistic_step == 0 and epoch >= self.artistic_start and self.artistic_step != -1)
or (epoch == self.artistic_start)) and (self.augment_geom or self.augment_texture)\
and artistic_reload:
artistic_reload = False
if epoch == self.artistic_start:
print ("****** adding artistic augmentation ******")
print ("****** augment_geom: " + str(self.augment_geom) + ", p_geom: " + str(p_geom) + " ******")
print ("****** augment_texture: " + str(self.augment_texture) + ", p_texture: " +
str(p_texture) + " ******")
if epoch % self.artistic_step == 0 and self.artistic_step != -1:
print ("****** increasing artistic augmentation probability ******")
p_geom = 1.- 0.95 ** (epoch/self.artistic_step)
p_texture = 1. - 0.95 ** (epoch/self.artistic_step)
print ("****** augment_geom: " + str(self.augment_geom) + ", p_geom: " + str(p_geom) + " ******")
print ("****** augment_texture: " + str(self.augment_texture) + ", p_texture: " +
str(p_texture) + " ******")
self.img_menpo_list = reload_menpo_image_list(
self.img_path, self.train_crop_dir, self.img_dir_ns, self.mode, self.train_inds,
image_size=self.image_size, augment_basic=(self.augment_basic and epoch >= self.basic_start),
augment_texture=self.augment_texture, p_texture=p_texture,
augment_geom=self.augment_geom, p_geom=p_geom)
# get batch images
batch_inds = img_inds[j * self.batch_size:(j + 1) * self.batch_size]
if self.approx_maps_gpu: # TODO: remove
if self.allocate_once:
load_images_landmarks_alloc_once(
self.img_menpo_list, batch_inds, images=batch_images, landmarks_small=batch_lms_small,
landmarks=None, primary=True, image_size=self.image_size, scale=self.scale)
create_heat_maps_base_alloc_once(
landmarks_small=batch_lms_small.astype(int), landmarks=None,
hm_small=batch_hm_base_small, hm_large=None, primary=True, num_images=self.batch_size,
num_landmarks=self.num_landmarks)
else:
batch_images, batch_lms_small = load_images_landmarks(
self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size, c_dim=self.c_dim,
num_landmarks=self.num_landmarks, scale=self.scale)
batch_hm_base_small = create_heat_maps_base(
landmarks_small=batch_lms_small.astype(int), landmarks=None, primary=True,
num_images=self.batch_size, image_size=self.image_size, num_landmarks=self.num_landmarks)
batch_maps_small = sess.run(self.filt_hm_small, {self.sparse_hm_small: batch_hm_base_small})
elif self.approx_maps_cpu:
if self.allocate_once:
load_images_landmarks_approx_maps_alloc_once(
self.img_menpo_list, batch_inds, images=batch_images, maps_small=batch_maps_small,
maps=None, landmarks=batch_lms_small, primary=True, image_size=self.image_size,
num_landmarks=self.num_landmarks, scale=self.scale, gauss_filt_small=gaussian_filt,
win_mult=self.win_mult, sigma=self.sigma, save_landmarks=self.compute_nme)
else:
batch_images, batch_maps_small, batch_lms_small = load_images_landmarks_approx_maps(
self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size,
num_landmarks=self.num_landmarks, c_dim=self.c_dim, scale=self.scale,
gauss_filt_small=gaussian_filt, win_mult=self.win_mult, sigma=self.sigma,
save_landmarks=self.compute_nme)
else:
if self.allocate_once:
load_images_landmarks_maps_alloc_once(
self.img_menpo_list, batch_inds, images=batch_images, maps_small=batch_maps_small,
landmarks=batch_lms_small, maps=None, primary=True, image_size=self.image_size,
num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,
save_landmarks=self.compute_nme)
else:
batch_images, batch_maps_small, batch_lms_small = load_images_landmarks_maps(
self.img_menpo_list, batch_inds, primary=True, image_size=self.image_size, c_dim=self.c_dim,
num_landmarks=self.num_landmarks, scale=self.scale, sigma=self.sigma,
save_landmarks=self.compute_nme)
feed_dict_train = {self.images: batch_images, self.heatmaps_small: batch_maps_small}
sess.run(train_op, feed_dict_train)
| |
<filename>commands/serverstats.py
import discord
from discord.ext import commands
from mojang import MojangAPI
from utils.utils import hypixel, utils
from utils.embeds import Embeds
import random
import datetime
import time as thyme
import mystbin
import re
mystbin_client = mystbin.MystbinClient()
class ServerStats(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['lb'])
async def leaderboard(self, ctx, game: str=None, *, typevar: str=None):
if ctx.guild is not None:
me = ctx.guild.get_member(self.bot.user.id)
perms = ctx.channel.permissions_for(me)
if perms.send_messages:
if not perms.embed_links:
await ctx.send("Error: Cannot send embeds in this channel. Please contact a server administrator to fix this issue.")
return
if perms.embed_links:
pass
if not perms.send_messages:
return
if game is None:
embed = discord.Embed(title="Error", description="""Please provide a game.""", color=0xff0000)
await ctx.send(embed=embed)
return
if typevar is None:
embed = discord.Embed(title="Error", description="""Please provide a leaderboard.""", color=0xff0000)
await ctx.send(embed=embed)
return
if not typevar.lower().startswith('overall') and not typevar.lower().startswith('monthly') and not typevar.lower().startswith('weekly') and not typevar.lower().startswith('daily'):
t = "Overall " + typevar
typevar = t
if typevar.lower() == 'overall level':
typevar = "Current Level"
#send request
data = await hypixel.leaderboards()
#errors
if data['success'] == False:
embed = discord.Embed(title="Error", description="""Something went wrong.""", color=0xff0000)
await ctx.send(embed=embed)
return
#it worked!
elif data['success'] == True:
game = game.upper()
typevar = typevar.lower()
leaders = None
title = None
for lb in data['leaderboards']:
if lb == game.upper():
for reekid in data['leaderboards'][lb]:
titl = reekid['prefix'] + " " + reekid['title']
if titl.lower() == typevar:
title = reekid['prefix'] + " " + reekid['title']
leaders = reekid['leaders']
break
if leaders is None:
embed = discord.Embed(title='Error', description='Invalid leaderboard.', color=0xff0000)
await ctx.send(embed=embed)
return
msg = ''
num = 0
async with ctx.channel.typing():
for uid in leaders:
uid = uid.replace('-','')
name = await hypixel.getname(uid)
if name is None:
name = 'N/A'
num += 1
msg += f"{num}: {name}\n"
color=random.randint(1, 16777215)
embed = discord.Embed(title=f'{game.lower().capitalize()}: {title} leaderboard', description=msg, color=color)
embed.set_footer(text='Unofficial Hypixel Discord Bot')
await ctx.send(embed=embed)
@commands.command(aliases=['players','count', 'pc'])
async def playercount(self, ctx):
perms = None
if ctx.guild is not None:
me = ctx.guild.get_member(self.bot.user.id)
perms = ctx.channel.permissions_for(me)
if perms.send_messages:
if not perms.embed_links:
await ctx.send("Error: Cannot send embeds in this channel. Please contact a server administrator to fix this issue.")
return
if perms.embed_links:
if not perms.add_reactions:
embed=discord.Embed(title="Error", description="Cannot add reactions in this channel. Please contact a server administrator to fix this issue.", color=0xff0000)
await ctx.send(embed=embed)
return
if perms.add_reactions:
pass
if not perms.send_messages:
return
data = await hypixel.counts()
if data['success'] == True:
embeds, paginator = await Embeds().PlayerCount().generate(ctx, data, perms)
await paginator.run(embeds)
else:
embed = discord.Embed(title="Error", description="""Couldn't retrieve Hypixel player counts. Please try again later.""", color=0xff0000)
await ctx.send(embed=embed)
return
@commands.command(aliases=['g'])
async def guild(self, ctx, *, guildname:str=None):
if ctx.guild is not None:
me = ctx.guild.get_member(self.bot.user.id)
perms = ctx.channel.permissions_for(me)
if perms.send_messages:
if not perms.embed_links:
await ctx.send("Error: Cannot send embeds in this channel. Please contact a server administrator to fix this issue.")
return
if perms.embed_links:
pass
if not perms.send_messages:
return
if guildname is None:
embed = discord.Embed(title="Error", description='Please provide a guild to search for.', color=0xff0000)
await ctx.send(embed=embed)
return
gnamesearch = guildname.replace(' ','%20')
try:
data = await hypixel.guild(gnamesearch)
except ValueError:
embed = discord.Embed(title="Error", description="""The guild """ + guildname + ' does not exist.', color=0xff0000)
await ctx.send(embed=embed)
return
try:
glevel = utils.guildlevel(xp=data['guild']['exp'])
except:
glevel = 'N/A'
try:
gname = data['guild']['name']
except:
gname = 'N/A'
try:
time = datetime.fromtimestamp(data['guild']['created']/1000.0)
date = time.strftime("%m/%d/%Y")
minute = time.strftime("%M")
if int(time.strftime('%H')) == 12:
ampm = 'PM'
hour = time.strftime('%H')
elif int(time.strftime('%H')) > 12:
hour = int(time.strftime('%H')) - 12
ampm = 'PM'
elif int(time.strftime('%H')) < 12:
ampm = 'AM'
hour = time.strftime('%H')
else: #this should never happen
hour = None
ampm = None
created = str(date) + ' at ' + str(hour) + ':' + str(minute) + ' ' + ampm + ', EST'
except:
created = 'N/A'
try:
desc = data['guild']['description']
except:
desc = 'N/A'
try:
tag = data['guild']['tag']
except:
tag = 'N/A'
try:
mbrs = len(data['guild']['members'])
except:
mbrs = 'N/A'
try:
gmuuid = data['guild']['members'][0]['uuid']
gm = await hypixel.getname(gmuuid)
if gm is None:
gm = 'N/A'
except:
gm = 'N/A'
color=random.randint(1, 16777215)
embed = discord.Embed(title='Guild Info', color=color)
embed.add_field(name="Guild Name", value=str(gname), inline=True)
embed.add_field(name="Guild Manager", value=str(gm), inline=True)
embed.add_field(name="Members", value=str(utils.comma(mbrs)), inline=True)
embed.add_field(name="Created On", value=str(created), inline=True)
embed.add_field(name="Guild Level", value=str(utils.comma(glevel)), inline=True)
embed.add_field(name="Guild Description", value=str(desc), inline=True)
embed.add_field(name="Guild Tag", value=str(tag), inline=True)
embed.set_footer(text='Unofficial Hypixel Discord Bot')
await ctx.send(embed=embed)
@commands.command(aliases=['wd'])
async def watchdog(self, ctx):
perms = None
if ctx.guild is not None:
me = ctx.guild.get_member(self.bot.user.id)
perms = ctx.channel.permissions_for(me)
if perms.send_messages:
if not perms.embed_links:
await ctx.send("Error: Cannot send embeds in this channel. Please contact a server administrator to fix this issue.")
return
if perms.embed_links:
pass
if not perms.send_messages:
return
data = await hypixel.watchdog()
if data['success'] == True:
try:
wdtotal = data['watchdog_total']
except:
wdtotal = 'N/A'
try:
stafftotal = data['staff_total']
except:
stafftotal = 'N/A'
color=random.randint(1, 16777215)
embed = discord.Embed(title="Hypixel Watchdog Statistics", color = color)
embed.add_field(name="Watchdog Bans", value=str(utils.comma(wdtotal)))
embed.add_field(name="Staff Bans", value=str(utils.comma(stafftotal)))
try:
embed.add_field(name="Total Bans", value=str(utils.comma(wdtotal+stafftotal)))
except:
embed.add_field(name="Total Bans", value='N/A')
embed.set_footer(text='Unofficial Hypixel Discord Bot')
await ctx.send(embed=embed)
@commands.command(aliases=['boosts'])
@commands.cooldown(1, 3600, commands.BucketType.user)
async def boosters(self, ctx, *, game:str=None):
if ctx.guild is not None:
me = ctx.guild.get_member(self.bot.user.id)
perms = ctx.channel.permissions_for(me)
if perms.send_messages:
if not perms.embed_links:
await ctx.send("Error: Cannot send embeds in this channel. Please contact a server administrator to fix this issue.")
return
if perms.embed_links:
pass
if not perms.send_messages:
return
color=random.randint(1, 16777215)
if game is None:
try:
number = await hypixel.boosters()
except ValueError:
embed = discord.Embed(title="Hypixel Boosters", description=f"There are currently 0 boosters active on the network.", color=color)
await ctx.send(embed=embed)
return
embed = discord.Embed(title="Hypixel Boosters", description=f"There are {utils.comma(number)} boosters on the Hypixel Network.\nFor boosters for a specific gamemode, please run `h!boosters <game>`.", color=color)
await ctx.send(embed=embed)
return
try:
data = await hypixel.boosters('id')
except ValueError:
embed = discord.Embed(title="Hypixel Boosters", description=f"There are currently 0 boosters active on the network.", color=color)
await ctx.send(embed=embed)
return
_game = game.replace(' ','_')
try:
id = utils.gameidconverter(_game)
except ValueError:
embed = discord.Embed(title="Error", description=f"Invalid game.", color=0xff0000)
await ctx.send(embed=embed)
return
msg1 = ''
amnt = 0
embed = discord.Embed(title="Hypixel Boosters", description=f"Collecting data, please wait.\nThis message will be edited once data is ready.", color=color)
embed.set_footer(text='Unofficial Hypixel Discord Bot')
message = await ctx.send(embed=embed)
for booster in data:
if booster['gameType'] == id:
user = await hypixel.getname(booster['purchaserUuid'])
msg1 += f"{user} - ID: {booster['_id']}\n"
amnt += 1
if msg1 == '':
msg1 = f"There are currently 0 {game.lower().capitalize()} boosters active."
paste = await mystbin_client.post(msg1.replace('\\', ''), syntax="text")
url = str(paste)
embed = discord.Embed(title="Hypixel Boosters", description=f"There are {amnt} {game.lower().capitalize()} boosters on the network.\nBoosters have been uploaded to {url}.", color=color)
embed.set_footer(text='Unofficial Hypixel Discord Bot')
await message.edit(embed=embed)
embed = discord.Embed(title=f"Your data is ready!", description=f"I have collected all of the {game.lower().capitalize()} boosters.\n[Jump to message]({message.jump_url})", color=color)
embed.set_footer(text='Unofficial Hypixel Discord Bot')
await ctx.send(f"{ctx.author.mention}", embed=embed)
self.boosters.reset_cooldown(ctx)
@commands.command(aliases=['boost'])
async def booster(self, ctx, booster: str=None):
if ctx.guild is not None:
me = ctx.guild.get_member(self.bot.user.id)
perms = ctx.channel.permissions_for(me)
if perms.send_messages:
if not perms.embed_links:
await ctx.send("Error: Cannot send embeds in this channel. Please contact a server administrator to fix this issue.")
return
if perms.embed_links:
pass
if not perms.send_messages:
return
color=random.randint(1, 16777215)
current = int(thyme.time())
if booster is None:
embed = discord.Embed(title="Error", description="""Please provide a booster ID.""", color=0xff0000)
await ctx.send(embed=embed)
return
try:
data = await hypixel.boosters('all')
except discord.NotFound:
embed = discord.Embed(title="Hypixel Boosters", description=f"There are currently 0 boosters active on the network.", color=color)
await ctx.send(embed=embed)
return
info = ''
for i in data:
if i['_id'] == booster:
info = i
break
if info == '':
embed = discord.Embed(title="Hypixel Boosters", description=f"Invalid booster.", color=0xff0000)
await ctx.send(embed=embed)
return
exp = 'N/A'
passed = current-(info['dateActivated']/1000)
remaining = info['length']-passed
seconds = int(float(remaining))
min, sec = divmod(seconds, 60)
hour, min = divmod(min, 60)
if hour == 0:
if min == 0:
exp = f'{sec} seconds'
elif min > 0:
if len(str(min)) == 1:
if len(str(sec)) == 1:
exp = f"0:0{min}:0{sec}"
else:
exp = f"0:0{min}:{sec}"
else:
if len(str(sec)) == 1:
exp = f"0:{min}:0{sec}"
else:
exp = f"0:{min}:{sec}"
elif hour > 0:
if len(str(hour)) == 1:
if len(str(min)) == 1:
exp = f"0{hour}:0{min}"
else:
exp = f"0{hour}:{min}"
else:
exp = f"{hour}:{min}"
try:
user = await hypixel.getname(info['purchaserUuid'])
if user is None:
embed = discord.Embed(title="Error", description="""Something went wrong. Please try again later.""", color=0xff0000)
await ctx.send(embed=embed)
return
except:
user = 'N/A'
try:
uuid = info['purchaserUuid']
except:
| |
#_ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# <NAME>
# Copyright 2018 Keeper Security Inc.
# Contact: <EMAIL>
#
import re
import os
import base64
import argparse
import logging
import datetime
import getpass
from typing import Optional, List
import requests
import tempfile
import json
from urllib.parse import urlsplit
from tabulate import tabulate
from Cryptodome.Cipher import AES
from ..params import KeeperParams, LAST_RECORD_UID, LAST_FOLDER_UID, LAST_SHARED_FOLDER_UID
from ..record import Record
from .. import api
from .base import raise_parse_exception, suppress_exit, user_choice, Command
from ..subfolder import try_resolve_path, find_folders, get_folder_path
def register_commands(commands):
commands['sync-down'] = SyncDownCommand()
commands['delete-all'] = RecordDeleteAllCommand()
commands['whoami'] = WhoamiCommand()
commands['login'] = LoginCommand()
commands['logout'] = LogoutCommand()
commands['check-enforcements'] = CheckEnforcementsCommand()
commands['connect'] = ConnectCommand()
commands['echo'] = EchoCommand()
commands['set'] = SetCommand()
def register_command_info(aliases, command_info):
aliases['d'] = 'sync-down'
aliases['delete_all'] = 'delete-all'
for p in [whoami_parser, login_parser, logout_parser, echo_parser, set_parser]:
command_info[p.prog] = p.description
command_info['sync-down|d'] = 'Download & decrypt data'
whoami_parser = argparse.ArgumentParser(prog='whoami', description='Information about logged in user')
whoami_parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='verbose output')
whoami_parser.error = raise_parse_exception
whoami_parser.exit = suppress_exit
login_parser = argparse.ArgumentParser(prog='login', description='Login to Keeper')
login_parser.add_argument('-p', '--password', dest='password', action='store', help='<PASSWORD>')
login_parser.add_argument('email', nargs='?', type=str, help='account email')
login_parser.error = raise_parse_exception
login_parser.exit = suppress_exit
logout_parser = argparse.ArgumentParser(prog='logout', description='Logout from Keeper')
logout_parser.error = raise_parse_exception
logout_parser.exit = suppress_exit
check_enforcements_parser = argparse.ArgumentParser(prog='check-enforcements', description='Check enterprise enforcements')
check_enforcements_parser.error = raise_parse_exception
check_enforcements_parser.exit = suppress_exit
connect_parser = argparse.ArgumentParser(prog='connect', description='Establishes connection to external server')
connect_parser.add_argument('--syntax-help', dest='syntax_help', action='store_true', help='display help on command format and template parameters')
connect_parser.add_argument('-n', '--new', dest='new_data', action='store_true', help='request per-user data')
connect_parser.add_argument('-s', '--sort', dest='sort_by', action='store', choices=['endpoint', 'title', 'folder'], help='sort output')
connect_parser.add_argument('-f', '--filter', dest='filter_by', action='store', help='filter output')
connect_parser.add_argument('endpoint', nargs='?', action='store', type=str, help='endpoint')
connect_parser.error = raise_parse_exception
connect_parser.exit = suppress_exit
echo_parser = argparse.ArgumentParser(prog='echo', description='Displays argument to output')
echo_parser.add_argument('argument', nargs='?', action='store', type=str, help='argument')
echo_parser.error = raise_parse_exception
echo_parser.exit = suppress_exit
set_parser = argparse.ArgumentParser(prog='set', description='Set environment variable')
set_parser.add_argument('name', action='store', type=str, help='name')
set_parser.add_argument('value', action='store', type=str, help='value')
set_parser.error = raise_parse_exception
set_parser.exit = suppress_exit
class SyncDownCommand(Command):
def execute(self, params, **kwargs):
api.sync_down(params)
accepted = False
if len(params.pending_share_requests) > 0:
for user in params.pending_share_requests:
accepted = False
print('Note: You have pending share request from ' + user)
answer = user_choice('Do you want to accept these request?', 'yn', 'n')
rq = {
'command': 'accept_share' if answer == 'y' else 'cancel_share',
'from_email': user
}
try:
rs = api.communicate(params, rq)
if rs['result'] == 'success':
accepted = accepted or answer == 'y'
except:
pass
params.pending_share_requests.clear()
if accepted:
params.sync_data = True
class RecordDeleteAllCommand(Command):
def execute(self, params, **kwargs):
uc = user_choice('Are you sure you want to delete all Keeper records on the server?', 'yn', default='n')
if uc.lower() == 'y':
api.sync_down(params)
if len(params.record_cache) == 0:
logging.warning('No records to delete')
return
request = {
'command': 'record_update',
'delete_records': [key for key in params.record_cache.keys()]
}
logging.info('removing %s records from Keeper', len(params.record_cache))
response_json = api.communicate(params, request)
success = [info for info in response_json['delete_records'] if info['status'] == 'success']
if len(success) > 0:
logging.info("%s records deleted successfully", len(success))
failures = [info for info in response_json['delete_records'] if info['status'] != 'success']
if len(failures) > 0:
logging.warning("%s records failed to delete", len(failures))
params.revision = 0
params.sync_data = True
class WhoamiCommand(Command):
def get_parser(self):
return whoami_parser
def execute(self, params, **kwargs):
is_verbose = kwargs.get('verbose') or False
if is_verbose:
if params.server:
parts = urlsplit(params.server)
host = parts[1]
cp = host.rfind(':')
if cp > 0:
host = host[:cp]
data_center = 'EU' if host.endswith('.eu') else 'US'
print('{0:>20s}: {1}'.format('Data Center', data_center))
environment = ''
if host.startswith('dev.'):
environment = 'DEV'
elif host.startswith('qa.'):
environment = 'QA'
if environment:
print('{0:>20s}: {1}'.format('Environment', environment))
print('')
if params.session_token:
print('{0:>20s}: {1:<20s}'.format('Logged in as', params.user))
if params.license:
print('')
account_type = params.license['account_type']
account_type_name = 'Enterprise' if account_type == 2 else 'Family Plan' if account_type == 1 else params.license['product_type_name']
print('{0:>20s} {1:>20s}: {2}'.format('Account', 'Type', account_type_name))
print('{0:>20s} {1:>20s}: {2}'.format('', 'Renewal Date', params.license['expiration_date']))
if 'bytes_total' in params.license:
storage_bytes = params.license['bytes_total']
storage_gb = storage_bytes >> 30
print('{0:>20s} {1:>20s}: {2}GB'.format('Storage', 'Capacity', storage_gb))
storage_usage = params.license['bytes_used'] * 100 // storage_bytes
print('{0:>20s} {1:>20s}: {2}%'.format('', 'Usage', storage_usage))
print('{0:>20s} {1:>20s}: {2}'.format('', 'Renewal Date', params.license['storage_expiration_date']))
if is_verbose:
print('')
print('{0:>20s}: {1}'.format('Records', len(params.record_cache)))
sf_count = len(params.shared_folder_cache)
if sf_count > 0:
print('{0:>20s}: {1}'.format('Shared Folders', sf_count))
team_count = len(params.team_cache)
if team_count > 0:
print('{0:>20s}: {1}'.format('Teams', team_count))
else:
print('{0:>20s}:'.format('Not logged in'))
class LoginCommand(Command):
def get_parser(self):
return login_parser
def is_authorised(self):
return False
def execute(self, params, **kwargs):
params.clear_session()
user = kwargs.get('email') or ''
password = kwargs.get('password') or ''
try:
if not user:
user = input('... {0:>16}: '.format('User(Email)')).strip()
if not user:
return
if not password:
password = getpass.getpass(prompt='... {0:>16}: '.format('Password'), stream=None).strip()
if not password:
return
except KeyboardInterrupt as e:
logging.info('Canceled')
return
params.user = user.lower()
params.password = password
logging.info('Logging in...')
api.login(params)
class CheckEnforcementsCommand(Command):
def get_parser(self):
return check_enforcements_parser
def is_authorised(self):
return False
def execute(self, params, **kwargs):
if params.enforcements:
if 'enterprise_invited' in params.enforcements:
print('You\'ve been invited to join {0}.'.format(params.enforcements['enterprise_invited']))
action = user_choice('A(ccept)/D(ecline)/I(gnore)?: ', 'adi')
action = action.lower()
if action == 'a':
action = 'accept'
elif action == 'd':
action = 'decline'
if action in ['accept', 'decline']:
e_rq = {
'command': '{0}_enterprise_invite'.format(action)
}
if action == 'accept':
verification_code = input('Please enter the verification code sent via email: ')
if verification_code:
e_rq['verification_code'] = verification_code
else:
e_rq = None
if e_rq:
try:
api.communicate(params, e_rq)
logging.info('%s enterprise invite', 'Accepted' if action == 'accept' else 'Declined')
#TODO reload enterprise settings
except Exception as e:
logging.error('Enterprise %s failure: %s', action, e)
if params.settings:
if 'share_account_to' in params.settings:
dt = datetime.datetime.fromtimestamp(params.settings['must_perform_account_share_by'] // 1000)
print('Your Keeper administrator has enabled the ability to transfer your vault records\n'
'in accordance with company operating procedures and policies.\n'
'Please acknowledge this change in account settings by typing ''Accept''.')
print('If you do not accept this change by {0}, you will be locked out of your account.'.format(dt.strftime('%a, %d %b %Y')))
try:
api.accept_account_transfer_consent(params, params.settings['share_account_to'])
finally:
del params.settings['must_perform_account_share_by']
del params.settings['share_account_to']
class LogoutCommand(Command):
def get_parser(self):
return logout_parser
def is_authorised(self):
return False
def execute(self, params, **kwargs):
params.clear_session()
connect_command_description = '''
Connect Command Syntax Description:
This command reads the custom fields for names starting with "connect:"
endpoint:<name> command
endpoint:<name>:description command description
Connection command may contain template parameters.
Parameter syntax is ${<parameter_name>}
Supported parameters:
${user_email} Keeper user email address
${login} Record login
${password} Record password
${text:<name>} non secured user variable. Stored to non-shared data
${mask:<name>} secured user variable. Stored to non-shared data
${file:<attachment_name>} stores attachment into temporary file. parameter is replaced with temp file name
${body:<attachment_name>} content of the attachment file.
SSH Example:
Title: SSH to my Server via Gateway
Custom Field 1 Name: connect:my_server:description
Custom Field 1 Value: Production Server Inside Gateway
Custom Field 2 Name: connect:my_server
Custom Field 2 Value: ssh -o "ProxyCommand ssh -i ${file:gateway.pem} <EMAIL> -W %h:%p" -i ${file:server.pem} <EMAIL>
File Attachments:
gateway.pem
server.pem
To initiate connection: "connect my_server"
'''
endpoint_pattern = re.compile(r'^connect:([^:]+)$')
endpoint_desc_pattern = re.compile(r'^connect:([^:]+):description$')
endpoint_parameter_pattern = re.compile(r'\${(.+?)}')
class ConnectEndpoint:
def __init__(self, name, description, record_uid, record_title, paths):
self.name = name
self.description = description
self.record_uid = record_uid
self.record_title = record_title
self.paths = paths
class ConnectCommand(Command):
LastRevision = 0 # int
Endpoints = [] # type: List[ConnectEndpoint]
def get_parser(self):
return connect_parser
def execute(self, params, **kwargs):
if kwargs.get('syntax_help'):
logging.info(connect_command_description)
return
ConnectCommand.find_endpoints(params)
endpoint = kwargs.get('endpoint')
if endpoint:
endpoints = [x for x in ConnectCommand.Endpoints if x.name == endpoint]
if not endpoints:
folder = None
rpos = endpoint.rfind('/')
if rpos > 0:
try_path = endpoint[:rpos+1]
rs = try_resolve_path(params, try_path)
if rs is not None:
if not rs[1]:
folder = rs[0]
endpoint = endpoint[rpos+1:]
endpoints = [x for x in ConnectCommand.Endpoints if x.name == endpoint]
if len(endpoints) > 0:
ConnectCommand.connect_endpoint(params, endpoint, api.get_record(params, endpoints[0].record_uid), kwargs.get('new_data') or False)
else:
logging.info("Connect endpoint '{0}' not found".format(endpoint))
else:
if ConnectCommand.Endpoints:
sorted_by = kwargs['sort_by'] or 'endpoint'
filter_by = kwargs['filter_by'] or ''
logging.info("Available connect endpoints")
if filter_by:
logging.info('Filtered by \"%s\"', filter_by)
filter_by = filter_by.lower()
logging.info('')
headers = ["#", 'Endpoint', 'Description', 'Record Title', 'Folder(s)']
table = []
for i in range(len(ConnectCommand.Endpoints)):
endpoint = ConnectCommand.Endpoints[i]
title = endpoint.record_title
folder = endpoint.paths[0] if len(endpoint.paths) > 0 else '/'
if filter_by:
if not any([x for x in [endpoint.name.lower(), title.lower(), folder.lower()] if x.find(filter_by) >= 0]):
continue
if len(title) > 23:
title = title[:20] + '...'
table.append([i + 1, endpoint.name, endpoint.description or '', title, folder])
table.sort(key=lambda x: x[4] if sorted_by == 'folder' else x[3] if sorted_by == 'title' else x[1])
print(tabulate(table, headers=headers))
print('')
else:
logging.info("No connect endpoints found")
return
@staticmethod
def find_endpoints(params):
# type: (KeeperParams) -> None
if ConnectCommand.LastRevision < params.revision:
ConnectCommand.LastRevision = params.revision
ConnectCommand.Endpoints.clear()
for record_uid in params.record_cache:
record = api.get_record(params, record_uid)
if record.custom_fields:
endpoints = []
endpoints_desc = {}
for field in record.custom_fields:
if 'name' in field:
m = endpoint_pattern.match(field['name'])
if m:
endpoints.append(m[1])
else:
| |
yaml|grep -w ha|wc -l'.format(vmid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if not cancle:
if int(ret):
flag = True
break
else:
if not int(ret):
flag = True
break
assert flag
def check_cloudHost_makeSnapshot_ok(response, vmid, hostip):
id = response.json().get("id")
assert id
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = 'kubectl get vmd|grep {0}|wc -l'.format(vmid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_makeVMimage_ok(response, hostip):
id = response.json().get("id")
assert id
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = 'find / -name {0}|wc -l'.format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_modify_cpu_num_ok(response, cpunum_new, hostip):
id = response.json().get("id")
assert id
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = "virsh vcpucount %s|grep current|awk '{print $3}'|tail -1" % (id,)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) == cpunum_new:
flag = True
break
assert flag
def check_modify_mem_ok(response, memorysize, hostip):
#print(11111111111111111111111111111111111111111)
#print(response.json())
id = response.json().get("id")
#print("this is id....", id)
assert id
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = "virsh dominfo %s|grep Use|awk '{print $3}'" % (id,)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(int(ret)/(1024*1024)) == memorysize:
flag = True
break
assert flag
def check_query_cmrom_iso(response, vmid):
mirrorid_list = Query()('SELECT MIRRORID FROM `cl_mirror_inf` WHERE status=1 and MFORMAT="iso" AND '
'DOMAINID=(SELECT DOMAINID FROM `cl_vm_inf` WHERE VMID="{0}") '
'AND MIRRORID NOT IN (SELECT ISOID FROM `cl_vmcdrom_inf` WHERE'
' VMID="{1}")'.format(vmid, vmid))
rows = response.json().get("rows")
assert len(mirrorid_list) == len(rows)
for row in rows:
assert row.get("mirrorid") in mirrorid_list
def check_addCdrom_ok(vmid, mirrorid, hostip):
username = "root"
password = "user@dev"
ip = hostip
cmd = "kubectl get vm {0} -o yaml|grep {1}.iso|wc -l".format(vmid, mirrorid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
print("this is flag...", flag)
if int(ret):
flag = True
break
assert flag
def check_changeBootSequence_ok(response, vmid, bootSeq, hostip):
assert response.json().get("id")
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = "kubectl get vm {0} -o yaml|grep order|cut -d: -f 2".format(vmid, )
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
ret = ret.decode("utf-8").replace("\n", "").replace(" ", "")
if bootSeq == 1:
if ret == "12":
flag = True
break
elif bootSeq == 2:
if ret == "21":
flag = True
break
assert flag
def check_changeSystem_querySystem_ok(response, vmid):
mirrorid_list = Query()('SELECT MIRRORID FROM `cl_mirror_inf` WHERE status=1 and MFORMAT!="iso" AND '
'DOMAINID=(SELECT DOMAINID FROM `cl_vm_inf` WHERE VMID="{0}") '
'AND MIRRORID NOT IN (SELECT ISOID FROM `cl_vmcdrom_inf` WHERE'
' VMID="{1}")'.format(vmid, vmid))
rows = response.json().get("rows")
assert len(mirrorid_list) == len(rows)
for row in rows:
assert row.get("mirrorid") in mirrorid_list
def check_changeOs_ok(response, template_url, rootvolumeid, hostip):
username = "root"
password = "<PASSWORD>"
ip = hostip
cmd = "diff %s `kubectl get vmd %s|tail -1|awk '{print $3}'`|wc -l" % (template_url, rootvolumeid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
print("this is flag...", flag)
if not int(ret):
flag = True
break
assert flag
def check_delete_mirror_all_ok(response):
print(response.json())
def check_delete_mirrorServer_ok(response, mirrorServerId):
print(response.json())
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = "kubectl get vmp|grep {0}|wc -l".format(mirrorServerId)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
print("this is flag...", flag)
if not int(ret):
flag = True
break
assert flag
def check_delete_all_resource_ok(response, flag="vm"):
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
ids = response.json().get("id")
ids_list = ids.split(",")
for id in ids_list:
if flag == "vm":
cmd = "kubectl get vm|grep {0}|wc -l".format(id)
else:
cmd = "kubectl get vmp|grep {0}|wc -l".format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
#print("this is flag...", flag)
if not int(ret):
flag = True
break
assert flag
def check_delete_net(response, l2vmn_num=2):
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = "kubectl get vmn|grep l2network|wc -l"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
try:
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
ret = int(ret)
except Exception:
flag = True
break
if int(l2vmn_num) - ret == 2:
flag = True
break
assert flag
#l2vmn check
def check_creat_net_ok(response, l2vmn_num=0):
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = "kubectl get vmn|grep l2network|wc -l"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) - int(l2vmn_num) == 2:
flag = True
break
assert flag
def check_creat_l3_net_ok(response):
id = response.json().get("id")
assert id
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
cmd = "kubectl get vmn|grep {0}|wc -l".format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_creat_vxlanPool_ok(response, pool_name):
#print(response)
try:
code = response.json().get("code")
if "1" in pool_name:
assert -1 == code
else:
assert 1 == code
except Exception as e:
print(e)
assert True
def check_add_vxlan_vni_ok(response, flag):
print(response.json())
if 3 == flag:
assert response.json().get("code") == 1
if 2 == flag:
assert response.json().get("code") == -1
if 1 == flag:
assert response.json().get("code") == -1
def check_delete_vni_range_ok(response, vni_list, vnistart, endvni):
for vni in vni_list.split(","):
if vni in range(int(vnistart), int(endvni) + 1):
assert -1 == response.json().get("code")
assert 1 == response.json().get("code")
def check_delete_vxlan_net_ok(response, vni, vxlan_clusterid_list):
assert response.json().get("code") == 1
#print(vxlan_clusterid_list)
#print(7777777777777777777777777)
try:
vxlan_clusterid_list = json.loads(vxlan_clusterid_list)
except Exception:
vxlan_clusterid_list = tuple(vxlan_clusterid_list.split(","))
#print(vxlan_clusterid_list)
#print(66666666666666)
if len(vxlan_clusterid_list) > 1:
sql_cmd = 'SELECT HOSTIP FROM `cl_host_inf` WHERE STATE=1 AND DELETED=0 AND `STATUS`="Ready" and CLUSTERID IN {0};'.format(str(vxlan_clusterid_list))
else:
sql_cmd = 'SELECT HOSTIP FROM `cl_host_inf` WHERE STATE=1 AND DELETED=0 AND `STATUS`="Ready" and CLUSTERID="{0}";'.format(vxlan_clusterid_list[0])
#print(sql_cmd)
#print(555555555555555555555)
result = Query()(sql_cmd)
ip_list = []
for re in result:
ip_list.append(re.get("HOSTIP"))
username = "root"
password = "<PASSWORD>"
for ip in ip_list:
cmd = "ovs-vsctl list-br|grep vx{0}|wc -l".format(vni)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.1)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if not int(ret):
flag = True
break
assert flag
def check_modify_l3network_mtu(response, mtu):
id = response.json().get("id")
cmd = "ovn-nbctl dhcp-options-get-options `ovn-nbctl show %s|grep dhcp|awk -F\"-\" '{print $3\"-\"$4\"-\"$5\"-\"$6\"-\"$7}'`|grep mtu|cut -d\"=\" -f2" % (id,)
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) == int(mtu):
flag = True
break
assert flag
def check_l3network_add_dns(response, mtu, rows, nid, dns_addr):
cmd = "ovn-nbctl dhcp-options-get-options `ovn-nbctl show %s|grep dhcp|awk -F\"-\" '{print $3\"-\"$4\"-\"$5\"-\"$6\"-\"$7}'`|grep -E 'mtu|dns'|sed ':a;N;s/\n/\t/;ba;'" % (nid,)
dns_list = [row.get("dns") for row in rows]
re_mtu = 0
re_dns_list = []
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.2)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
tp_str = ret.split()
for i in tp_str:
if "mtu" in i:
re_mtu = int(i.split("=")[1])
elif "dns" in i:
if "," in i:
re_dns_list = i[12:-1].split(",")
else:
re_dns_list.append(i.split("=")[1])
assert int(mtu) == re_mtu
assert dns_addr in re_dns_list
flag_2 = True
for dns in dns_list:
if dns not in re_dns_list:
flag_2 = False
break
if flag_2:
flag = True
break
assert flag
def check_vpc_network_add_ok(response):
id = response.json().get("id")
assert id
cmd = "kubectl get vmn|grep {0}|wc -l".format(id,)
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(40, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if 1 == int(ret):
flag = True
break
assert flag
def check_vpc_router_stop_or_start(response):
id = response.json().get("id")
cmd = "kubectl get vm|grep {0}|grep -i shut|wc -l".format(id, )
username = "root"
password = "<PASSWORD>"
ip = "172.16.130.254"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(100, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if 1 == int(ret):
flag = True
break
assert flag
def check_setConsolePasswd_ok(response, hostip, passwd=None):
id = response.json().get("id")
if passwd:
cmd = 'cat /tmp/%s.xml |grep passwd|awk -F"passwd=" \'{print $2}\'|cut -d"\"" -f2' % (id,)
else:
cmd = 'cat /tmp/%s.xml |grep passwd|wc | |
<filename>Chatops/Chatops/run.py
import math
from mattermostdriver import Driver, Websocket
import json
from Chatops import config, dialogflowfile, instances
import asyncio
import threading
import boto3
import boto3.session
import requests
from datetime import datetime
from botocore.exceptions import ClientError
from threading import Lock
import sqlalchemy as db
"""Send codeship notification to the user"""
def send_notification(channel_id, props):
d.posts.create_post(options={
'channel_id': channel_id,
'message': "",
"props": props})
"""If manager reject the request send message to the requested user"""
def reject_request(channel_id, reject_person, instance_name, type):
if type == 'scale_instance':
d.posts.create_post(options={
'channel_id': channel_id,
'message': f"Your request to scale **{instance_name}** was rejected by @{reject_person}. Please note that no "
f"actions has been taken for that instance. "
})
elif type == 'start_instance':
d.posts.create_post(options={
'channel_id': channel_id,
'message': f"Your request to start **{instance_name}** was rejected by @{reject_person}. Please note that no "
f"actions has been taken for that instance. "
})
elif type == 'stop_instance':
d.posts.create_post(options={
'channel_id': channel_id,
'message': f"Your request to stop **{instance_name}** was rejected by @{reject_person}. Please note that no "
f"actions has been taken for that instance. "
})
else:
d.posts.create_post(options={
'channel_id': channel_id,
'message': f"Your request to reboot **{instance_name}** was rejected by @{reject_person}. Please note that no "
f"actions has been taken for that instance. "
})
"""Check Permission the user is able to scale the instance or not"""
def check_permission(user_id, instance_name, channel_id, message, instance_type, type, engine, connection):
metadata = db.MetaData()
table_instances = db.Table('user_instance', metadata, autoload=True, autoload_with=engine)
query_f = db.select([table_instances.columns.id, table_instances.columns.name]).where(table_instances.columns.name == instance_name)
resultproxy = connection.execute(query_f)
instances = resultproxy.fetchall()
try:
instance_id = instances[0][0]
except IndexError:
d.posts.create_post(options={
'channel_id': channel_id,
'message': "Instance name is not found."})
return
table_manager = db.Table('user_manager', metadata, autoload=True, autoload_with=engine)
query_f = db.select([table_manager.columns.manager_id_id]).where(table_manager.columns.instance_id_id == instance_id)
resultproxy = connection.execute(query_f)
managers = resultproxy.fetchall()
managers_id = []
for manager in managers:
managers_id.append(manager[0])
table_botuser = db.Table('user_botuser', metadata, autoload=True, autoload_with=engine)
query_f = db.select([table_botuser.columns.id, table_botuser.columns.name]).where(table_botuser.columns.user_id == user_id)
resultproxy = connection.execute(query_f)
users = resultproxy.fetchall()
user = users[0][0]
user_name = users[0][1]
if managers and user not in managers_id:
table_instanceoperation = db.Table('user_instanceoperation', metadata, autoload=True, autoload_with=engine)
query_i = table_instanceoperation.insert().values(requested_user_id=user, message=message,
channel_id=channel_id, status="Pending",
created_date=datetime.utcnow())
row = connection.execute(query_i)
request_id = row.lastrowid
manager_set = set()
for item in managers_id:
manager_set.add(str(item))
query_f = db.select([table_botuser.columns.id, table_botuser.columns.channel_id], table_botuser.columns.id.in_(manager_set))
resultproxy = connection.execute(query_f)
send_request = resultproxy.fetchall()
if type == 'scale_instance':
for item in send_request:
"""action button data"""
accept_button = json.dumps({'status': 'Accept', 'user': user, 'type': 'scale_instance',
'request_id': request_id, 'manager_id': item[0], 'message': message})
reject_button = json.dumps({'status': 'Reject', 'user': user, 'type': 'scale_instance',
'request_id': request_id, 'manager_id': item[0], 'message': message})
d.posts.create_post(options={
"channel_id": item[1],
"message": f"@{user_name} is request to scale instance **{instance_name}** with instance type **{instance_type}**",
"props": {"attachments": [
{
"text": "Please Accept or Reject the Request",
"color": "#3AA3E3",
"attachment_type": "default",
"actions": [
{
"name": "Accept",
"type": "button",
"value": "Accept",
"integration": {
"url": f"{config.BASE_URL}/permission",
"context": {
"action": str(accept_button)
}
}
},
{
"name": "Reject",
"type": "button",
"value": "Reject",
"integration": {
"url": f"{config.BASE_URL}/permission",
"context": {
"action": str(reject_button)
}
}
}
]
}
]
}})
elif type == 'start_instance':
for item in send_request:
"""action button data"""
accept_button = json.dumps({'status': 'Accept', 'user': user, 'type': 'start_instance',
'request_id': request_id, 'manager_id': item[0], 'message': message})
reject_button = json.dumps({'status': 'Reject', 'user': user, 'type': 'start_instance',
'request_id': request_id, 'manager_id': item[0], 'message': message})
d.posts.create_post(options={
"channel_id": item[1],
"message": f"@{user_name} is request to start instance **{instance_name}**",
"props": {"attachments": [
{
"text": "Please Accept or Reject the Request",
"color": "#3AA3E3",
"attachment_type": "default",
"actions": [
{
"name": "Accept",
"type": "button",
"value": "Accept",
"integration": {
"url": f"{config.BASE_URL}/permission",
"context": {
"action": str(accept_button)
}
}
},
{
"name": "Reject",
"type": "button",
"value": "Reject",
"integration": {
"url": f"{config.BASE_URL}/permission",
"context": {
"action": str(reject_button)
}
}
}
]
}
]
}})
elif type == 'stop_instance':
for item in send_request:
"""action button data"""
accept_button = json.dumps({'status': 'Accept', 'user': user, 'type': 'stop_instance',
'request_id': request_id, 'manager_id': item[0], 'message': message})
reject_button = json.dumps({'status': 'Reject', 'user': user, 'type': 'stop_instance',
'request_id': request_id, 'manager_id': item[0], 'message': message})
d.posts.create_post(options={
"channel_id": item[1],
"message": f"@{user_name} is request to stop instance **{instance_name}**",
"props": {"attachments": [
{
"text": "Please Accept or Reject the Request",
"color": "#3AA3E3",
"attachment_type": "default",
"actions": [
{
"name": "Accept",
"type": "button",
"value": "Accept",
"integration": {
"url": f"{config.BASE_URL}/permission",
"context": {
"action": str(accept_button)
}
}
},
{
"name": "Reject",
"type": "button",
"value": "Reject",
"integration": {
"url": f"{config.BASE_URL}/permission",
"context": {
"action": str(reject_button)
}
}
}
]
}
]
}})
else:
for item in send_request:
"""action button data"""
accept_button = json.dumps({'status': 'Accept', 'user': user, 'type': 'reboot_instance',
'request_id': request_id, 'manager_id': item[0], 'message': message})
reject_button = json.dumps({'status': 'Reject', 'user': user, 'type': 'reboot_instance',
'request_id': request_id, 'manager_id': item[0], 'message': message})
d.posts.create_post(options={
"channel_id": item[1],
"message": f"@{user_name} is request to reboot instance **{instance_name}**",
"props": {"attachments": [
{
"text": "Please Accept or Reject the Request",
"color": "#3AA3E3",
"attachment_type": "default",
"actions": [
{
"name": "Accept",
"type": "button",
"value": "Accept",
"integration": {
"url": f"{config.BASE_URL}/permission",
"context": {
"action": str(accept_button)
}
}
},
{
"name": "Reject",
"type": "button",
"value": "Reject",
"integration": {
"url": f"{config.BASE_URL}/permission",
"context": {
"action": str(reject_button)
}
}
}
]
}
]
}})
return False
else:
table_instanceoperation = db.Table('user_instanceoperation', metadata, autoload=True, autoload_with=engine)
query_i = table_instanceoperation.insert().values(requested_user_id=user, message=message,
channel_id=channel_id, status="Accepted",
created_date=datetime.utcnow(), response_by_id=user,
response_date=datetime.utcnow())
connection.execute(query_i)
return True
"""Check project config in database or not"""
def check_project(project_name, engine, connection):
"""Get all project from database"""
metadata = db.MetaData()
table_project = db.Table('user_project', metadata, autoload=True, autoload_with=engine)
query_f = db.select([table_project.columns.codeship_project_name, table_project.columns.gitlab_project_id])
resultproxy = connection.execute(query_f)
projects = resultproxy.fetchall()
for project in projects:
if project[0] == project_name:
return project[1]
return None
"""Notify the instance access user"""
def notify_stack_holders(instance_name, user_id, requested_user, status, type, engine, connection):
metadata = db.MetaData()
table_botuser = db.Table('user_botuser', metadata, autoload=True, autoload_with=engine)
table_instanceaccess = db.Table('user_instanceaccess', metadata, autoload=True, autoload_with=engine)
table_instance = db.Table('user_instance', metadata, autoload=True, autoload_with=engine)
join_query = table_instanceaccess.join(table_botuser, table_instanceaccess.columns.user_id_id == table_botuser.columns.id).join(table_instance, table_instanceaccess.columns.instance_id_id == table_instance.columns.id)
query = db.select([table_botuser.columns.channel_id, table_botuser.columns.name, table_botuser.columns.user_id]).select_from(join_query).where(table_instance.columns.name == instance_name)
resultproxy = connection.execute(query)
users = resultproxy.fetchall()
query_f = db.select([table_botuser.columns.name]).where(table_botuser.columns.user_id == user_id)
resultproxy = connection.execute(query_f)
approve_user = resultproxy.fetchall()
approve_user = approve_user[0][0]
if status == 'start':
for user in users:
if user[2] != user_id:
if type == 'scale_instance':
if user[1] != requested_user:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Instance **{instance_name}** is getting scaled on request of @{requested_user} "
f"approved by @{approve_user}"})
else:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Your request to scale instance **{instance_name}** has been approved by @{approve_user}. Instance is being scaled at the moment, I will update you once it completes. "})
if type == 'start_instance':
if user[1] != requested_user:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Instance **{instance_name}** is getting started on request of @{requested_user} "
f"approved by @{approve_user}"})
else:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Your request to start instance **{instance_name}** has been approved by @{approve_user}. Instance is being started at the moment, I will update you once it completes. "})
if type == 'stop_instance':
if user[1] != requested_user:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Instance **{instance_name}** is getting stopped on request of @{requested_user} "
f"approved by @{approve_user}"})
else:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Your request to stop instance **{instance_name}** has been approved by @{approve_user}. Instance is being stopped at the moment, I will update you once it completes. "})
if status == 'end':
for user in users:
if user[2] != user_id:
if type == 'scale_instance':
if user[1] != requested_user:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Instance **{instance_name}** has been scaled on request of @{requested_user} "
f"approved by @{approve_user}"})
else:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Your request to scale instance **{instance_name}** has been successful. The instance should reflect your requested configuration. "})
if type == 'start_instance':
if user[1] != requested_user:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Instance **{instance_name}** has been started on request of @{requested_user} "
f"approved by @{approve_user}"})
else:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Your request to start instance **{instance_name}** has been successful. The instance should reflect your requested configuration. "})
if type == 'stop_instance':
if user[1] != requested_user:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Instance **{instance_name}** has been stopped on request of @{requested_user} "
f"approved by @{approve_user}"})
else:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Your request to stop instance **{instance_name}** has been successful. The instance should reflect your requested configuration. "})
if type == 'reboot_instance':
if user[1] != requested_user:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Instance **{instance_name}** has been reboot on request of @{requested_user} "
f"approved by @{approve_user}"})
else:
d.posts.create_post(options={
'channel_id': user[0],
'message': f"Your request to reboot instance **{instance_name}** has been successful. The instance should reflect your requested configuration. "})
"""Get all instance from aws"""
def get_instance(engine, connection):
session = boto3.session.Session()
| |
<reponame>harvardnlp/readcomp
import argparse
from collections import defaultdict
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import datastuff
class Reader(nn.Module):
"""
attn sum style reader dealie
"""
def __init__(self, word_embs, words_new2old, opt):
super(Reader, self).__init__()
self.wlut = nn.Embedding(opt.wordtypes, opt.emb_size)
if opt.std_feats:
self.flut = nn.Embedding(opt.ftypes, opt.emb_size if opt.add_inp else opt.feat_size)
if opt.speaker_feats:
self.splut = nn.Embedding(opt.sptypes, opt.emb_size if opt.add_inp else opt.sp_size)
self.emb_size, self.rnn_size, self.add_inp = opt.emb_size, opt.rnn_size, opt.add_inp
self.std_feats, self.speaker_feats = opt.std_feats, opt.speaker_feats
insize = opt.emb_size
if opt.std_feats and not opt.add_inp:
insize += 3*opt.feat_size + opt.extra_size
if opt.speaker_feats and not opt.add_inp:
insize += 2*opt.sp_size
self.doc_rnn = nn.GRU(insize, 2*opt.rnn_size, opt.layers, bidirectional=True)
self.drop = nn.Dropout(opt.dropout)
if opt.add_inp:
self.extr_lin = nn.Linear(opt.extra_size, opt.emb_size)
else:
self.extr_mul = nn.Parameter(
torch.Tensor(1, 1, opt.extra_size).uniform_(-opt.initrange, opt.initrange))
self.inp_activ = nn.ReLU() if opt.relu else nn.Tanh()
self.softmax = nn.Softmax(dim=1)
self.initrange = opt.initrange
self.mt_loss, self.mt_step_mode = opt.mt_loss, opt.mt_step_mode
if self.mt_loss == "idx-loss":
mt_in = 2*opt.rnn_size if self.mt_step_mode == "before" else 4*opt.rnn_size
self.doc_mt_lin = nn.Linear(mt_in, opt.max_entities+1) # 0 is an ignore idx
elif self.mt_loss == "ant-loss":
self.transform_for_ants = opt.transform_for_ants
if opt.transform_for_ants:
trans_size = 2*opt.rnn_size if self.mt_step_mode == "before" else 4*opt.rnn_size
self.ant_lin = nn.Linear(2*opt.rnn_size, trans_size, bias=False)
self.topdrop, self.mt_drop = opt.topdrop, opt.mt_drop
self.use_choices, self.use_test_choices = opt.use_choices, opt.use_test_choices
self.init_weights(word_embs, words_new2old)
def init_weights(self, word_embs, words_new2old):
"""
(re)init weights
"""
initrange = self.initrange
luts = [self.wlut]
if self.std_feats:
luts.append(self.flut)
if self.speaker_feats:
luts.append(self.splut)
for lut in luts:
lut.weight.data.uniform_(-initrange, initrange)
rnns = [self.doc_rnn]
for rnn in rnns:
for thing in rnn.parameters():
thing.data.uniform_(-initrange, initrange)
lins = []
if self.add_inp:
lins.append(self.extr_lin)
if self.mt_loss == "idx-loss":
lins.append(self.doc_mt_lin)
if self.mt_loss == "ant-loss" and self.transform_for_ants:
lins.append(self.ant_lin)
for lin in lins:
lin.weight.data.uniform_(-initrange, initrange)
if lin.bias is not None:
lin.bias.data.zero_()
# do the word embeddings
for i in xrange(len(words_new2old)):
old_idx = words_new2old[i]
if old_idx < word_embs.size(0):
self.wlut.weight.data[i][:word_embs.size(1)].copy_(word_embs[old_idx])
def forward(self, batch, val=False):
"""
returns bsz x seqlen scores
"""
seqlen, bsz = batch["words"].size()
wembs = self.wlut(batch["words"]) # seqlen x bsz -> seqlen x bsz x emb_size
if self.std_feats:
# seqlen x bsz x 3 -> seqlen x bsz*3 x emb_size -> seqlen x bsz x 3 x emb_size
fembs = self.flut(batch["feats"].view(seqlen, -1)).view(
seqlen, bsz, -1, self.flut.embedding_dim)
if self.speaker_feats:
# seqlen x bsz x 2 -> seqlen x bsz*2 x emb_size -> seqlen x bsz x 2 x emb_size
sembs = self.splut(batch["spee_feats"].view(seqlen, -1)).view(
seqlen, bsz, -1, self.splut.embedding_dim)
inp = wembs
if self.add_inp: # mlp the input
if self.std_feats:
ex_size = batch["extr"].size(2)
inp = (inp + fembs.sum(2)
+ self.extr_lin(batch["extr"].view(-1, ex_size)).view(seqlen, bsz, -1))
if self.speaker_feats:
inp = inp + sembs.sum(2)
if self.std_feats or self.speaker_feats:
inp = self.inp_activ(inp)
else: # concatenate everything
things_to_cat = [inp]
if self.std_feats:
things_to_cat.append(fembs.view(seqlen, bsz, -1))
things_to_cat.append(batch["extr"] * self.extr_mul.expand_as(batch["extr"]))
if self.speaker_feats:
things_to_cat.append(sembs.view(seqlen, bsz, -1))
if len(things_to_cat) > 1:
inp = torch.cat(things_to_cat, 2) # seqlen x bsz x sum (all the stuff)
if self.drop.p > 0:
inp = self.drop(inp)
# view each state as [fwd_q, fwd_d, bwd_d, bwd_q]
states, _ = self.doc_rnn(inp) # seqlen x bsz x 2*2*rnn_size
doc_states = states[:, :, self.rnn_size:3*self.rnn_size]
if args.use_qidx:
# get states before and after the question idx
b4states = states.view(-1, states.size(2))[batch["qpos"]-bsz][:, :self.rnn_size]
afterstates = states.view(-1, states.size(2))[batch["qpos"]+bsz][:, -self.rnn_size:]
query_rep = torch.cat([b4states, afterstates], 1) # bsz x 2*rnn_size
else:
query_rep = torch.cat([states[seqlen-1, :, :self.rnn_size],
states[0, :, -self.rnn_size:]], 1) # bsz x 2*rnn_size
if self.topdrop and self.drop.p > 0:
doc_states = self.drop(doc_states)
# bsz x seqlen x 2*rnn_size * bsz x 2*rnn_size x 1 -> bsz x seqlen x 1 -> bsz x seqlen
scores = torch.bmm(doc_states.transpose(0, 1), query_rep.unsqueeze(2)).squeeze(2)
if self.use_choices or (val and self.use_test_choices):
scores = batch["choicemask"] * scores
doc_mt_scores = None
if self.mt_loss == "idx-loss":
doc_mt_scores = self.get_step_scores(states)
elif self.mt_loss == "ant-loss":
doc_mt_scores = self.get_ant_scores(states)
return self.softmax(scores), doc_mt_scores
def get_states_for_step(self, states):
"""
gets the states we want for doing multiclass pred @ time t
args:
states - seqlen x bsz x 2*2*rnn_size; view each state as [fwd_q, fwd_d, bwd_d, bwd_q]
returns:
seqlen*bsz x something
"""
seqlen, bsz, drnn_sz = states.size()
if not hasattr(self, "dummy"):
self.dummy = states.data.new(1, drnn_sz/2).zero_()
dummy = self.dummy
if self.mt_step_mode == "exact":
nustates = states.view(-1, drnn_sz) # seqlen*bsz x 2*2*rnn_size
elif self.mt_step_mode == "before-after":
dummyvar = Variable(dummy.expand(bsz, drnn_sz/2))
# prepend zeros to front, giving seqlen*bsz x 2*rnn_size
fwds = torch.cat([dummyvar, states.view(-1, drnn_sz)[:-bsz, :drnn_sz/2]], 0)
# append zeros to back, giving seqlen*bsz x 2*rnn_size
bwds = torch.cat([states.view(-1, drnn_sz)[bsz:, drnn_sz/2:], dummyvar], 0)
nustates = torch.cat([fwds, bwds], 1) # seqlen*bsz x 2*2*rnn_size
elif self.mt_step_mode == "before": # just before
dummyvar = Variable(dummy.expand(bsz, drnn_sz/2))
# prepend zeros to front, giving seqlen*bsz x 2*rnn_size
nustates = torch.cat([dummyvar, states.view(-1, drnn_sz)[:-bsz, :drnn_sz/2]], 0)
else:
assert False, "%s not a thing" % self.mt_step_mode
return nustates
def get_step_scores(self, states):
"""
states - seqlen x bsz x 2*2*rnn_size
returns:
seqlen*bsz x nclasses
"""
states_for_step = self.get_states_for_step(states)
if self.mt_drop and self.drop.p > 0:
states_for_step = self.drop(states_for_step)
doc_mt_preds = self.doc_mt_lin(states_for_step) # seqlen*bsz x nclasses
return doc_mt_preds
def get_ant_scores(self, states):
"""
states - seqlen x bsz x 2*2*rnn_size
return:
bsz x seqlen x seqlen
"""
seqlen, bsz, drnn_sz = states.size()
states_for_step = self.get_states_for_step(states).view(seqlen, bsz, -1)
# may need to transform first....
# bsz x seqlen x sz * bsz x sz x seqlen -> bsz x seqlen x seqlen
if self.transform_for_ants:
ant_states = states.view(-1, states.size(2))[:, :drnn_sz/2] # seqlen*bsz x 2*rnn_size
ant_states = self.ant_lin(ant_states).view(seqlen, bsz, -1)
else:
ant_states = states[:, :, :drnn_sz/2] # seqlen x bsz x 2*rnn_size
scores = torch.bmm(states_for_step.transpose(0, 1),
ant_states.transpose(0, 1).transpose(1, 2))
return scores
def get_ncorrect(batch, scores):
"""
i'm just gonna brute force this
scores - bsz x seqlen
answers - bsz
"""
bsz, seqlen = scores.size()
words, answers = batch["words"].data, batch["answers"].data
ncorrect = 0
for b in xrange(bsz):
word2prob = defaultdict(float)
best, best_prob = -1, -float("inf")
for i in xrange(seqlen):
word2prob[words[i][b]] += scores.data[b][i]
if word2prob[words[i][b]] > best_prob:
best = words[i][b]
best_prob = word2prob[words[i][b]]
ncorrect += (best == answers[b])
return ncorrect
def attn_sum_loss(batch, scores):
"""
scores - bsz x seqlen
answers - bsz
"""
bsz, seqlen = scores.size()
mask = batch["answers"].data.unsqueeze(1).expand(bsz, seqlen).eq(batch["words"].data.t())
marg_log_prob_sum = (scores * Variable(mask.float())).sum(1).log().sum()
return -marg_log_prob_sum
xent = nn.CrossEntropyLoss(ignore_index=0, size_average=False)
def multitask_loss1(batch, doc_mt_scores):
"""
doc_mt_scores - seqlen*bsz x nclasses
"""
mt1_targs = batch["mt1_targs"] # seqlen x bsz, w/ 0 where we want to ignore
loss = xent(doc_mt_scores, mt1_targs.view(-1))
return loss
def multitask_loss2(batch, doc_mt_scores, sm):
"""
doc_mt_scores - bsz x seqlen x seqlen
N.B. rn this only considers entities that are repeated; should
really have it predict a dummy value for things not repeated
"""
bsz, seqlen, _ = doc_mt_scores.size()
loss = 0
reps = batch["mt2_targs"] # bsz x seqlen; indicators for repeated entities
for b in xrange(bsz):
# get lower triangle (excluding diagonal!) then softmax
pws = sm(torch.tril(doc_mt_scores[b], diagonal=-1)) # seqlen x seqlen
words_b = batch["words"].data[:, b].unsqueeze(1).expand(seqlen, seqlen).t()
#mask = ents[b].data.unsqueeze(1).expand(seqlen, seqlen).eq(words_b)
mask = words_b.t().eq(words_b) # seqlen x seqlen
marg_log_probs = (pws * Variable(torch.tril( # probably not necessary to tril...
mask.float(), diagonal=-1))).sum(1).add_(1e-6).log()
# we need to ignore rows not corresponding to entities
loss = loss - marg_log_probs.dot(reps[b])
return loss
parser = argparse.ArgumentParser(description='')
parser.add_argument('-datafile', type=str, default='', help='')
parser.add_argument('-bsz', type=int, default=64, help='')
parser.add_argument('-maxseqlen', type=int, default=1024, help='')
parser.add_argument('-save', type=str, default='', help='path to save the final model')
parser.add_argument('-load', type=str, default='', help='path to saved model')
parser.add_argument('-std_feats', action='store_true', help='')
parser.add_argument('-speaker_feats', action='store_true', help='')
parser.add_argument('-use_choices', action='store_true', help='')
parser.add_argument('-use_test_choices', action='store_true', help='')
parser.add_argument('-use_qidx', action='store_true', help='')
parser.add_argument('-query_idx', type=int, default=56298, help='query idx in ORIGINAL data')
parser.add_argument('-mt_loss', type=str, default='',
choices=["", "idx-loss", "ant-loss"], help='')
parser.add_argument('-mt_step_mode', type=str, default='before',
choices=["exact", "before-after", "before"],
help='which rnn states to use when doing mt stuff')
parser.add_argument('-max_entities', type=int, default=2,
help='number of distinct entities to predict')
parser.add_argument('-max_mentions', type=int, default=2,
help='number of entity tokens to predict')
parser.add_argument('-transform_for_ants', action='store_true', help='')
parser.add_argument('-mt_coeff', type=float, default=1, help='scales mt loss')
parser.add_argument('-emb_size', type=int, default=128, help='size of word embeddings')
parser.add_argument('-rnn_size', type=int, default=128, help='size of rnn hidden state')
parser.add_argument('-feat_size', type=int, default=128, help='')
parser.add_argument('-sp_size', type=int, default=80, help='')
parser.add_argument('-layers', type=int, default=1, help='num rnn layers')
parser.add_argument('-add_inp', action='store_true', help='mlp features (instead of concat)')
parser.add_argument('-dropout', type=float, default=0, help='dropout')
parser.add_argument('-topdrop', action='store_true', help='dropout on last rnn layer')
parser.add_argument('-mt_drop', action='store_true', help='dropout before mt decoder')
parser.add_argument('-relu', action='store_true', help='relu for input mlp')
parser.add_argument('-optim', type=str, default='adam', help='')
parser.add_argument('-lr', type=float, default=0.001, help='learning rate')
parser.add_argument('-beta1', type=float, default=0.9, help='')
parser.add_argument('-epochs', type=int, default=4, help='')
parser.add_argument('-clip', type=float, default=5, help='gradient clipping')
parser.add_argument('-initrange', type=float, default=0.1, help='uniform init interval')
parser.add_argument('-seed', type=int, default=3435, help='')
parser.add_argument('-log_interval', type=int, default=200, help='')
parser.add_argument('-test', action='store_true', help='')
parser.add_argument('-just_eval', action='store_true', help='')
parser.add_argument('-cuda', action='store_true', help='')
args = parser.parse_args()
if __name__ == "__main__":
print args
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
| |
<reponame>entelecheia/electra-tf2<filename>electra/pretrain/pretrain_utils.py
# coding=utf-8
# Copyright 2020 The Google Research Authors.
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for preparing pre-training data and supplying them to the model."""
import collections
import os
import numpy as np
import tensorflow as tf
from ..util import utils
from ..model import tokenization
def get_dataset(config, batch_size, num_cpu_threads=4, world_size=1, rank=0):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.io.FixedLenFeature([config.max_seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([config.max_seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([config.max_seq_length], tf.int64),
}
input_files = []
for input_pattern in config.pretrain_tfrecords.split(","):
input_files.extend(tf.io.gfile.glob(input_pattern))
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.shard(num_shards=world_size, index=rank)
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files), seed=config.seed, reshuffle_each_iteration=False)
cycle_length = min(num_cpu_threads, len(input_files))
d = d.interleave(
tf.data.TFRecordDataset,
cycle_length=cycle_length,
deterministic=True)
d = d.shuffle(buffer_size=100, seed=config.seed, reshuffle_each_iteration=False)
d = d.map(lambda record: _decode_record(record, name_to_features))
d = d.batch(batch_size)
return d
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.cast(t, tf.int32)
example[name] = t
return example
# model inputs - it's a bit nicer to use a namedtuple rather than keep the
# features as a dict
Inputs = collections.namedtuple(
"Inputs", ["input_ids", "input_mask", "segment_ids", "masked_lm_positions",
"masked_lm_ids", "masked_lm_weights"])
def features_to_inputs(features):
return Inputs(
input_ids=features["input_ids"],
input_mask=features["input_mask"],
segment_ids=features["segment_ids"],
masked_lm_positions=(features["masked_lm_positions"]
if "masked_lm_positions" in features else None),
masked_lm_ids=(features["masked_lm_ids"]
if "masked_lm_ids" in features else None),
masked_lm_weights=(features["masked_lm_weights"]
if "masked_lm_weights" in features else None),
)
def get_updated_inputs(inputs, **kwargs):
features = inputs._asdict()
for k, v in kwargs.items():
features[k] = v
return features_to_inputs(features)
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if isinstance(tensor, np.ndarray) or isinstance(tensor, list):
shape = np.array(tensor).shape
if isinstance(expected_rank, six.integer_types):
assert len(shape) == expected_rank
elif expected_rank is not None:
assert len(shape) in expected_rank
return shape
#
# if name is None:
# name = tensor.name
#
# if expected_rank is not None:
# assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def gather_positions(sequence, positions):
"""Gathers the vectors at the specific positions over a minibatch.
Args:
sequence: A [batch_size, seq_length] or
[batch_size, seq_length, depth] tensor of values
positions: A [batch_size, n_positions] tensor of indices
Returns: A [batch_size, n_positions] or
[batch_size, n_positions, depth] tensor of the values at the indices
"""
shape = get_shape_list(sequence, expected_rank=[2, 3])
depth_dimension = (len(shape) == 3)
if depth_dimension:
B, L, D = shape
else:
B, L = shape
D = 1
sequence = tf.expand_dims(sequence, -1)
position_shift = tf.expand_dims(L * tf.range(B), -1)
flat_positions = tf.reshape(positions + position_shift, [-1])
flat_sequence = tf.reshape(sequence, [B * L, D])
gathered = tf.gather(flat_sequence, flat_positions)
if depth_dimension:
return tf.reshape(gathered, [B, -1, D])
else:
return tf.reshape(gathered, [B, -1])
def scatter_update(sequence, updates, positions):
"""Scatter-update a sequence.
Args:
sequence: A [batch_size, seq_len] or [batch_size, seq_len, depth] tensor
updates: A tensor of size batch_size*seq_len(*depth)
positions: A [batch_size, n_positions] tensor
Returns: A tuple of two tensors. First is a [batch_size, seq_len] or
[batch_size, seq_len, depth] tensor of "sequence" with elements at
"positions" replaced by the values at "updates." Updates to index 0 are
ignored. If there are duplicated positions the update is only applied once.
Second is a [batch_size, seq_len] mask tensor of which inputs were updated.
"""
shape = get_shape_list(sequence, expected_rank=[2, 3])
depth_dimension = (len(shape) == 3)
if depth_dimension:
B, L, D = shape
else:
B, L = shape
D = 1
sequence = tf.expand_dims(sequence, -1)
N = get_shape_list(positions)[1]
shift = tf.expand_dims(L * tf.range(B), -1)
flat_positions = tf.reshape(positions + shift, [-1, 1])
flat_updates = tf.reshape(updates, [-1, D])
updates = tf.scatter_nd(flat_positions, flat_updates, [B * L, D])
updates = tf.reshape(updates, [B, L, D])
flat_updates_mask = tf.ones([B * N], tf.int32)
updates_mask = tf.scatter_nd(flat_positions, flat_updates_mask, [B * L])
updates_mask = tf.reshape(updates_mask, [B, L])
not_first_token = tf.concat([tf.zeros((B, 1), tf.int32),
tf.ones((B, L - 1), tf.int32)], -1)
updates_mask *= not_first_token
updates_mask_3d = tf.expand_dims(updates_mask, -1)
# account for duplicate positions
if sequence.dtype == tf.float32:
updates_mask_3d = tf.cast(updates_mask_3d, tf.float32)
updates /= tf.maximum(1.0, updates_mask_3d)
else:
assert sequence.dtype == tf.int32
updates = tf.math.floordiv(updates, tf.maximum(1, updates_mask_3d))
updates_mask = tf.minimum(updates_mask, 1)
updates_mask_3d = tf.minimum(updates_mask_3d, 1)
updated_sequence = (((1 - updates_mask_3d) * sequence) +
(updates_mask_3d * updates))
if not depth_dimension:
updated_sequence = tf.squeeze(updated_sequence, -1)
return updated_sequence, updates_mask
def _get_candidates_mask(inputs: Inputs, vocab,
disallow_from_mask=None):
"""Returns a mask tensor of positions in the input that can be masked out."""
ignore_ids = [vocab["[SEP]"], vocab["[CLS]"], vocab["[MASK]"]]
candidates_mask = tf.ones_like(inputs.input_ids, tf.bool)
for ignore_id in ignore_ids:
candidates_mask &= tf.not_equal(inputs.input_ids, ignore_id)
candidates_mask &= tf.cast(inputs.input_mask, tf.bool)
if disallow_from_mask is not None:
candidates_mask &= ~disallow_from_mask
return candidates_mask
def mask(config, inputs, mask_prob, proposal_distribution=1.0,
disallow_from_mask=None, already_masked=None):
"""Implementation of dynamic masking. The optional arguments aren't needed for
BERT/ELECTRA and are from early experiments in "strategically" masking out
tokens instead of uniformly at random.
Args:
config: configure_pretraining.PretrainingConfig
inputs: pretrain_data.Inputs containing input input_ids/input_mask
mask_prob: percent of tokens to mask
proposal_distribution: for non-uniform masking can be a [B, L] tensor
of scores for masking each position.
disallow_from_mask: a boolean tensor of [B, L] of positions that should
not be masked out
already_masked: a boolean tensor of [B, N] of already masked-out tokens
for multiple rounds of masking
Returns: a pretrain_data.Inputs with masking added
"""
# Get the batch size, sequence length, and max masked-out tokens
N = config.max_predictions_per_seq
B, L = get_shape_list(inputs.input_ids)
# Find indices where masking out a token is allowed
vocab = tokenization.ElectraTokenizer(
config.vocab_file, do_lower_case=config.do_lower_case).get_vocab()
candidates_mask = _get_candidates_mask(inputs, vocab, disallow_from_mask)
# Set the number of tokens to mask out per example
num_tokens = tf.cast(tf.reduce_sum(inputs.input_mask, -1), tf.float32)
num_to_predict = tf.maximum(1, tf.minimum(
N, tf.cast(tf.round(num_tokens * mask_prob), tf.int32)))
masked_lm_weights = tf.cast(tf.sequence_mask(num_to_predict, N), tf.float32)
if already_masked is not None:
masked_lm_weights *= (1 - already_masked)
# Get a probability of masking each position in the sequence
candidate_mask_float = tf.cast(candidates_mask, tf.float32)
sample_prob = (proposal_distribution * candidate_mask_float)
sample_prob /= tf.reduce_sum(sample_prob, axis=-1, keepdims=True)
# Sample the positions to mask out
sample_prob = tf.stop_gradient(sample_prob)
sample_logits = tf.math.log(sample_prob)
masked_lm_positions = tf.random.categorical(
sample_logits, N, dtype=tf.int32)
masked_lm_positions *= tf.cast(masked_lm_weights, tf.int32)
# Get the ids of the masked-out tokens
shift = tf.expand_dims(L * tf.range(B), -1)
flat_positions = tf.reshape(masked_lm_positions + shift, [-1, 1])
masked_lm_ids = tf.gather_nd(tf.reshape(inputs.input_ids, [-1]),
flat_positions)
masked_lm_ids = tf.reshape(masked_lm_ids, [B, -1])
masked_lm_ids *= tf.cast(masked_lm_weights, tf.int32)
# Update the input ids
replace_with_mask_positions = masked_lm_positions * tf.cast(
tf.less(tf.random.uniform([B, N]), 0.85), tf.int32)
inputs_ids, _ = scatter_update(
inputs.input_ids, tf.fill([B, N], vocab["[MASK]"]),
replace_with_mask_positions)
return get_updated_inputs(
inputs,
input_ids=tf.stop_gradient(inputs_ids),
masked_lm_positions=masked_lm_positions,
masked_lm_ids=masked_lm_ids,
masked_lm_weights=masked_lm_weights
)
def unmask(inputs: Inputs):
unmasked_input_ids, _ = scatter_update(
inputs.input_ids, inputs.masked_lm_ids, inputs.masked_lm_positions)
return get_updated_inputs(inputs, input_ids=unmasked_input_ids)
def sample_from_softmax(logits, disallow=None):
if disallow is not None:
logits -= 1000.0 * disallow
uniform_noise = tf.random.uniform(
get_shape_list(logits), minval=0, maxval=1)
gumbel_noise = tf.cast(-tf.math.log(-tf.math.log(uniform_noise + 1e-9) + 1e-9), logits.dtype)
return tf.one_hot(tf.argmax(tf.nn.softmax(logits + gumbel_noise), -1,
output_type=tf.int32), logits.shape[-1])
ENDC = "\033[0m"
COLORS = ["\033[" + str(n) + "m" for n in list(range(91, 97)) + [90]]
RED = COLORS[0]
BLUE = COLORS[3]
CYAN = COLORS[5]
GREEN = COLORS[1]
def print_tokens(inputs: Inputs, inv_vocab, updates_mask=None):
"""Pretty-print model inputs."""
pos_to_tokid = {}
for tokid, pos, weight in zip(
inputs.masked_lm_ids[0], inputs.masked_lm_positions[0],
inputs.masked_lm_weights[0]):
if weight == 0:
pass
else:
pos_to_tokid[pos] = tokid
text = ""
provided_update_mask = (updates_mask is | |
<reponame>mohanedmoh/TBS
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=unused-argument
import mock
from pyfakefs import fake_filesystem_unittest
from py_utils import cloud_storage
import dependency_manager
from dependency_manager import exceptions
class DependencyManagerTest(fake_filesystem_unittest.TestCase):
def setUp(self):
self.lp_info012 = dependency_manager.LocalPathInfo(
['path0', 'path1', 'path2'])
self.cloud_storage_info = dependency_manager.CloudStorageInfo(
'cs_bucket', 'cs_hash', 'download_path', 'cs_remote_path')
self.dep_info = dependency_manager.DependencyInfo(
'dep', 'platform', 'config_file', local_path_info=self.lp_info012,
cloud_storage_info=self.cloud_storage_info)
self.setUpPyfakefs()
def tearDown(self):
self.tearDownPyfakefs()
# TODO(nednguyen): add a test that construct
# dependency_manager.DependencyManager from a list of DependencyInfo.
def testErrorInit(self):
with self.assertRaises(ValueError):
dependency_manager.DependencyManager(None)
with self.assertRaises(ValueError):
dependency_manager.DependencyManager('config_file?')
def testInitialUpdateDependencies(self):
dep_manager = dependency_manager.DependencyManager([])
# Empty BaseConfig.
dep_manager._lookup_dict = {}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
base_config_mock.IterDependencyInfo.return_value = iter([])
dep_manager._UpdateDependencies(base_config_mock)
self.assertFalse(dep_manager._lookup_dict)
# One dependency/platform in a BaseConfig.
dep_manager._lookup_dict = {}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep = 'dependency'
plat = 'platform'
dep_info.dependency = dep
dep_info.platform = plat
base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
expected_lookup_dict = {dep: {plat: dep_info}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info.Update.called)
# One dependency multiple platforms in a BaseConfig.
dep_manager._lookup_dict = {}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
dep = 'dependency'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep
dep_info2.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter([dep_info1,
dep_info2])
expected_lookup_dict = {dep: {plat1: dep_info1,
plat2: dep_info2}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
# Multiple dependencies, multiple platforms in a BaseConfig.
dep_manager._lookup_dict = {}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
dep1 = 'dependency1'
dep2 = 'dependency2'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep1
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep1
dep_info2.platform = plat2
dep_info3 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info3.dependency = dep2
dep_info3.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter(
[dep_info1, dep_info2, dep_info3])
expected_lookup_dict = {dep1: {plat1: dep_info1,
plat2: dep_info2},
dep2: {plat2: dep_info3}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info3.Update.called)
def testFollowupUpdateDependenciesNoOverlap(self):
dep_manager = dependency_manager.DependencyManager([])
dep = 'dependency'
dep1 = 'dependency1'
dep2 = 'dependency2'
dep3 = 'dependency3'
plat1 = 'platform1'
plat2 = 'platform2'
plat3 = 'platform3'
dep_info_a = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_a.dependency = dep1
dep_info_a.platform = plat1
dep_info_b = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_b.dependency = dep1
dep_info_b.platform = plat2
dep_info_c = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_c.dependency = dep
dep_info_c.platform = plat1
start_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
# Empty BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
base_config_mock.IterDependencyInfo.return_value = iter([])
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(start_lookup_dict, dep_manager._lookup_dict)
# One dependency/platform in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info.dependency = dep3
dep_info.platform = plat1
base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep3: {plat3: dep_info}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertItemsEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info.Update.called)
self.assertFalse(dep_info_a.Update.called)
self.assertFalse(dep_info_b.Update.called)
self.assertFalse(dep_info_c.Update.called)
# One dependency multiple platforms in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep2
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep2
dep_info2.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter([dep_info1,
dep_info2])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep2: {plat1: dep_info1,
plat2: dep_info2}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info_a.Update.called)
self.assertFalse(dep_info_b.Update.called)
self.assertFalse(dep_info_c.Update.called)
# Multiple dependencies, multiple platforms in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep1 = 'dependency1'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep2
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep2
dep_info2.platform = plat2
dep_info3 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info3.dependency = dep3
dep_info3.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter(
[dep_info1, dep_info2, dep_info3])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep2: {plat1: dep_info1,
plat2: dep_info2},
dep3: {plat2: dep_info3}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info3.Update.called)
self.assertFalse(dep_info_a.Update.called)
self.assertFalse(dep_info_b.Update.called)
self.assertFalse(dep_info_c.Update.called)
# Ensure the testing data wasn't corrupted.
self.assertEqual(start_lookup_dict,
{dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}})
def testFollowupUpdateDependenciesWithCollisions(self):
dep_manager = dependency_manager.DependencyManager([])
dep = 'dependency'
dep1 = 'dependency1'
dep2 = 'dependency2'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info_a = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_a.dependency = dep1
dep_info_a.platform = plat1
dep_info_b = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_b.dependency = dep1
dep_info_b.platform = plat2
dep_info_c = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_c.dependency = dep
dep_info_c.platform = plat1
start_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
# One dependency/platform.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info.dependency = dep
dep_info.platform = plat1
base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertItemsEqual(expected_lookup_dict, dep_manager._lookup_dict)
dep_info_a.Update.assert_called_once_with(dep_info)
self.assertFalse(dep_info.Update.called)
self.assertFalse(dep_info_b.Update.called)
self.assertFalse(dep_info_c.Update.called)
dep_info_a.reset_mock()
dep_info_b.reset_mock()
dep_info_c.reset_mock()
# One dependency multiple platforms in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep1
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep2
dep_info2.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter([dep_info1,
dep_info2])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep2: {plat2: dep_info2}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info_a.Update.called)
self.assertFalse(dep_info_b.Update.called)
dep_info_c.Update.assert_called_once_with(dep_info1)
dep_info_a.reset_mock()
dep_info_b.reset_mock()
dep_info_c.reset_mock()
# Multiple dependencies, multiple platforms in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep1 = 'dependency1'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep1
dep_info2.platform = plat1
dep_info3 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info3.dependency = dep2
dep_info3.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter(
[dep_info1, dep_info2, dep_info3])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep2: {plat2: dep_info3}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info3.Update.called)
self.assertFalse(dep_info_b.Update.called)
dep_info_a.Update.assert_called_once_with(dep_info1)
dep_info_c.Update.assert_called_once_with(dep_info2)
# Collision error.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info.dependency = dep
dep_info.platform = plat1
base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
dep_info_a.Update.side_effect = ValueError
self.assertRaises(ValueError,
dep_manager._UpdateDependencies, base_config_mock)
# Ensure the testing data wasn't corrupted.
self.assertEqual(start_lookup_dict,
{dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}})
def testGetDependencyInfo(self):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(dep_manager._lookup_dict)
# No dependencies in the dependency manager.
self.assertEqual(None, dep_manager._GetDependencyInfo('missing_dep',
'missing_plat'))
dep_manager._lookup_dict = {'dep1': {'plat1': 'dep_info11',
'plat2': 'dep_info12',
'plat3': 'dep_info13'},
'dep2': {'plat1': 'dep_info11',
'plat2': 'dep_info21',
'plat3': 'dep_info23',
'default': 'dep_info2d'},
'dep3': {'plat1': 'dep_info31',
'plat2': 'dep_info32',
'default': 'dep_info3d'}}
# Dependency not in the dependency manager.
self.assertEqual(None, dep_manager._GetDependencyInfo(
'missing_dep', 'missing_plat'))
# Dependency in the dependency manager, but not the platform. No default.
self.assertEqual(None, dep_manager._GetDependencyInfo(
'dep1', 'missing_plat'))
# Dependency in the dependency manager, but not the platform, but a default
# exists.
self.assertEqual('dep_info2d', dep_manager._GetDependencyInfo(
'dep2', 'missing_plat'))
# Dependency and platform in the dependency manager. A default exists.
self.assertEqual('dep_info23', dep_manager._GetDependencyInfo(
'dep2', 'plat3'))
# Dependency and platform in the dependency manager. No default exists.
self.assertEqual('dep_info12', dep_manager._GetDependencyInfo(
'dep1', 'plat2'))
@mock.patch(
'dependency_manager.dependency_info.DependencyInfo.GetRemotePath') # pylint: disable=line-too-long
def testFetchPathUnititializedDependency(
self, cs_path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(cs_path_mock.call_args)
cs_path = 'cs_path'
cs_path_mock.return_value = cs_path
# Empty lookup_dict
with self.assertRaises(exceptions.NoPathFoundError):
dep_manager.FetchPath('dep', 'plat_arch_x86')
# Non-empty lookup dict that doesn't contain the dependency we're looking
# for.
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
with self.assertRaises(exceptions.NoPathFoundError):
dep_manager.FetchPath('dep', 'plat_arch_x86')
@mock.patch('os.path')
@mock.patch(
'dependency_manager.DependencyManager._GetDependencyInfo')
@mock.patch(
'dependency_manager.dependency_info.DependencyInfo.GetRemotePath') # pylint: disable=line-too-long
def testFetchPathLocalFile(self, cs_path_mock, dep_info_mock, path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(cs_path_mock.call_args)
cs_path = 'cs_path'
dep_info = self.dep_info
cs_path_mock.return_value = cs_path
# The DependencyInfo returned should be passed through to LocalPath.
dep_info_mock.return_value = dep_info
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path exists.
dep_manager._lookup_dict = {'dep': {'platform' : self.dep_info},
'dep2': mock.MagicMock()}
self.fs.CreateFile('path1')
found_path = dep_manager.FetchPath('dep', 'platform')
self.assertEqual('path1', found_path)
self.assertFalse(cs_path_mock.call_args)
@mock.patch(
'dependency_manager.dependency_info.DependencyInfo.GetRemotePath') # pylint: disable=line-too-long
def testFetchPathRemoteFile(
self, cs_path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(cs_path_mock.call_args)
cs_path = 'cs_path'
def FakeCSPath():
self.fs.CreateFile(cs_path)
return cs_path
cs_path_mock.side_effect = FakeCSPath
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path doesn't exist, but cloud_storage_path is downloaded.
dep_manager._lookup_dict = {'dep': {'platform' : self.dep_info,
'plat1': mock.MagicMock()},
'dep2': {'plat2': mock.MagicMock()}}
found_path = dep_manager.FetchPath('dep', 'platform')
self.assertEqual(cs_path, found_path)
@mock.patch(
'dependency_manager.dependency_info.DependencyInfo.GetRemotePath') # pylint: disable=line-too-long
def testFetchPathError(
self, cs_path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(cs_path_mock.call_args)
cs_path_mock.return_value = None
dep_manager._lookup_dict = {'dep': {'platform' : self.dep_info,
'plat1': mock.MagicMock()},
'dep2': {'plat2': mock.MagicMock()}}
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path doesn't exist, and cloud_storage path wasn't successfully
# found.
self.assertRaises(exceptions.NoPathFoundError,
dep_manager.FetchPath, 'dep', 'platform')
cs_path_mock.side_effect = cloud_storage.CredentialsError
self.assertRaises(cloud_storage.CredentialsError,
dep_manager.FetchPath, 'dep', 'platform')
cs_path_mock.side_effect = cloud_storage.CloudStorageError
self.assertRaises(cloud_storage.CloudStorageError,
dep_manager.FetchPath, 'dep', 'platform')
cs_path_mock.side_effect = cloud_storage.PermissionError
self.assertRaises(cloud_storage.PermissionError,
dep_manager.FetchPath, 'dep', 'platform')
def testLocalPath(self):
dep_manager = dependency_manager.DependencyManager([])
# Empty lookup_dict
with self.assertRaises(exceptions.NoPathFoundError):
dep_manager.LocalPath('dep', 'plat')
def testLocalPathNoDependency(self):
# Non-empty lookup dict that doesn't contain the dependency we're looking
# for.
dep_manager = dependency_manager.DependencyManager([])
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
with self.assertRaises(exceptions.NoPathFoundError):
dep_manager.LocalPath('dep', 'plat')
def testLocalPathExists(self):
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path exists.
dep_manager = dependency_manager.DependencyManager([])
dep_manager._lookup_dict = {'dependency' : {'platform': self.dep_info},
'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
self.fs.CreateFile('path1')
found_path = dep_manager.LocalPath('dependency', 'platform')
self.assertEqual('path1', found_path)
def testLocalPathMissingPaths(self):
# Non-empty lookup dict that contains the | |
amber:
print("Turning on build of AMBER parameter/coordinate files in order to obtain files for GROMACS via conversion.")
amber = True
#NOW GENERATE FINAL STORAGE FOR FILE NAMES/COMPONENT LISTS
#Now that we're building, we can generate the full set of components
#we will be using since we're done adding components
# List of all the smiles strings
self.smiles_strings = []
# List of all the number of monomers
self.n_monomers = []
# List of all the mole fractions
self.mole_fractions = []
# List of all the effective compound names. If the compond name is None
# than the compound label will be used in this list as compound name
self.labels = []
# The filling compound is a compound with None molecule number and None
# mole fraction. It is used to fill out the solution
self.filling_compound = None
# Lists of filenames related to gaff mol2 files, amber files and sdf
# file format
self.gaff_mol2_filenames = []
self.frcmod_filenames = []
self.inpcrd_filenames = []
self.prmtop_filenames = []
self.sdf_filenames = []
# Final strings for output filenames
self.mix_fname = ''
self.pdb_filename = ''
self.prmtop_filename = ''
self.inpcrd_filename = ''
self.top_filename = ''
self.gro_filename = ''
#BUILD
# Now begin building by building monomers
def build_monomers(self):
"""
Generate GAFF mol2 and frcmod files for each chemical
"""
# Filenames generation
for comp in self.component_list:
if comp.label:
mol2_filename = os.path.join(self.data_path_monomers, comp.label+'.mol2')
frcmod_filename = os.path.join(self.data_path_monomers, comp.label+'.frcmod')
inpcrd_filename = os.path.join(self.data_path_monomers, comp.label+'.inpcrd')
prmtop_filename = os.path.join(self.data_path_monomers, comp.label+'.prmtop')
sdf_filename = os.path.join(self.data_path_monomers, comp.label+'.sdf')
self.mix_fname = self.mix_fname + '_' + comp.label
else:
mol2_filename = os.path.join(self.data_path_monomers, comp.name+'.mol2')
frcmod_filename = os.path.join(self.data_path_monomers, comp.name+'.frcmod')
inpcrd_filename = os.path.join(self.data_path_monomers, comp.name+'.inpcrd')
prmtop_filename = os.path.join(self.data_path_monomers, comp.name+'.prmtop')
sdf_filename = os.path.join(self.data_path_monomers, comp.name+'.sdf')
self.mix_fname = self.mix_fname + '_' + comp.name
# Filling compound selection
if comp.number == None and comp.mole_fraction == None:
if self.filling_compound == None:
self.filling_compound = comp
self.mole_fractions.append(comp.mole_fraction)
else:
raise ValueError('Error: Two or more fillig compounds have been specified')
# Number and mol fractions lists generation
if comp.number:
self.n_monomers.append(comp.number)
if comp.mole_fraction is not None:
self.mole_fractions.append(comp.mole_fraction)
# Lists of filenames generation
self.smiles_strings.append(comp.smiles)
self.gaff_mol2_filenames.append(mol2_filename)
self.frcmod_filenames.append(frcmod_filename)
self.inpcrd_filenames.append(inpcrd_filename)
self.prmtop_filenames.append(prmtop_filename)
self.sdf_filenames.append(sdf_filename)
if not (os.path.exists(mol2_filename) and os.path.exists(frcmod_filename)):
#Convert SMILES strings to mol2 and frcmod files for antechamber
openmoltools.openeye.smiles_to_antechamber(comp.smiles, mol2_filename, frcmod_filename, protonation = self.protonation_model)
#Correct the mol2 file partial atom charges to have a total net integer molecule charge
mol2f = parmed.formats.Mol2File
mol2f.write(parmed.load_file(mol2_filename).fix_charges(),mol2_filename, compress_whitespace=True)
#Generate amber coordinate and topology files for the unsolvated molecules
mol_name = os.path.basename(mol2_filename).split('.')[0]
openmoltools.amber.run_tleap(mol_name, mol2_filename, frcmod_filename, prmtop_filename, inpcrd_filename)
#Read Mol2 File and write SDF file
mol2tosdf.writeSDF(mol2_filename, sdf_filename, mol_name)
#Generate unique residue names for molecules in mol2 files
openmoltools.utils.randomize_mol2_residue_names(self.gaff_mol2_filenames)
def build_boxes(self):
"""
Build an initial box with packmol and use it to generate AMBER files
"""
def mole_fractions_to_n_monomers(self, density= 1 * grams/milliliter, cutoff=12*angstrom):
"""
This function is used to generate the number of molecules for
each compound in the solution from the mole fractions of each molecule.
Parameters
----------
density : openmm units
the solution density
cutoff : openmm units
the cutoff distance of the largest compound in the solution
Returns
-------
self.n_monomers : integer list
the list of molecule number for each compound in the solution
size : float
the edge of the box volume
"""
oechem = import_("openeye.oechem")
# Calculate the maximum atomic distance in a molecule
def max_dist_mol(mol):
max_dist = 0.0
coords = mol.GetCoords() # Are the coords always in A in mol2 file?
for i in range(0, mol.NumAtoms()):
crdi = np.array([coords[i][0], coords[i][1], coords[i][2]])
for j in range(i+1, mol.NumAtoms()):
crdj = np.array([coords[j][0], coords[j][1], coords[j][2]])
dist = np.linalg.norm(crdi-crdj)
if dist > max_dist:
max_dist = dist
return max_dist * angstrom
# The sum of all the mole fractions
sum_fractions = sum([i for i in self.mole_fractions if i != None])
if sum_fractions > 1.0:
raise ValueError('Error: The total molar fraction is greater than 1.0')
if sum_fractions == 1.0 and self.filling_compound:
raise ValueError('Error: The total molar fraction is 1.0 and it is not possible to add any filling compound to the solution, but a filling compound was specified')
if sum_fractions < 1.0 and not self.filling_compound:
raise ValueError('Error: The total molar fraction is less than 1.0 and no filling compound (i.e. compound with unspecified mole fraction) is provided')
if self.filling_compound:
self.filling_compound.mole_fraction = 1.0 - sum_fractions
self.mole_fractions = [i if i != None else (1.0 - sum_fractions) for i in self.mole_fractions]
max_dist_mols = 0.0 * angstrom
delta_volume = 0.0 * angstrom**3
sum_wgt_frac = 0.0 * grams/mole
for i in range(0, len(self.sdf_filenames)):
istream = oechem.oemolistream(self.sdf_filenames[i])#gaff_mol2_files give wrong wgt because not sybyl format!
mol = oechem.OEMol()
if not oechem.OEReadMolecule(istream, mol):
raise IOError('Error: It was not possible to create the OpenEye molecule object by reading the file: %s' % self.gaff_mol2_filenames[i])
# Molecular weight
wgt = oechem.OECalculateMolecularWeight(mol) * grams/mole
if self.component_list[i].mole_fraction == 0.0:
delta_volume = oechem.OECalculateMolecularWeight(mol) * angstrom**3
sum_wgt_frac = sum_wgt_frac + wgt * self.component_list[i].mole_fraction
max_dist= max_dist_mol(mol)
if max_dist > max_dist_mols:
max_dist_mols = max_dist
cube_length = ((max_dist_mols + 2*cutoff)**3 + delta_volume)**(1.0/3.0)
n_monomers = []
# n_i = Volume * Density * mole_fraction_i/sum_j(wgt_j * mole_fraction_j)
self.n_monomers = [int(round(AVOGADRO_CONSTANT_NA * comp.mole_fraction * density * cube_length**3 / sum_wgt_frac)) \
if comp.mole_fraction !=0 else 1 for comp in self.component_list]
return self.n_monomers, cube_length
if not self.gaff_mol2_filenames:
raise ValueError('The list of gaff mol2 molecules is empty')
if self.n_monomers and self.mole_fractions:
print (self.n_monomers, self.mole_fractions)
raise ValueError('Error: For different compounds it is not possible to mix mole_fractions and number of molecules')
# The solution has been specified by using number of molecules
if self.n_monomers:
if self.filling_compound:
raise ValueError('Error: The filling compound cannot be mixed with components specified by defining the number of molecules')
size = openmoltools.packmol.approximate_volume_by_density(self.smiles_strings, self.n_monomers)
mdtraj_components = [md.load(mol2) for mol2 in self.gaff_mol2_filenames]
# Standardize water to avoid issue where waters are not recognized by MDTraj unless they have specific names
for component in mdtraj_components:
openmoltools.packmol.standardize_water(component)
packed_trj = openmoltools.packmol.pack_box( mdtraj_components, self.n_monomers, box_size = size)
self.labels = self.mix_fname[1:].split('_')
self.mix_fname = self.mix_fname[1:] + ''.join(['_'+str(i) for i in self.n_monomers])
self.pdb_filename = os.path.join(self.data_path_packmol, self.mix_fname+'.pdb')
packed_trj.save(self.pdb_filename)
# The solutions has been specified by using mole fractions
elif self.mole_fractions:
n_monomers, size = mole_fractions_to_n_monomers(self)
# WARNING: The size estimated with the mole_to_n_monomers
# function is underestimating the volume calculated by using
# openmoltools and for now we are using this estimate.
# If the volume is underestimated, apparently Packmol struggles
# to find convergence and introduces extra molecules
# into the found best solutionx (bug?)
size = openmoltools.packmol.approximate_volume_by_density(self.smiles_strings, self.n_monomers)
mdtraj_components = [md.load(mol2) for mol2 in self.gaff_mol2_filenames]
# Standardize water to avoid issue where waters are not recognized by MDTraj unless they have specific names
for component in mdtraj_components:
openmoltools.packmol.standardize_water(component)
packed_trj = openmoltools.packmol.pack_box(mdtraj_components, n_monomers, box_size = size)
self.labels = self.mix_fname[1:].split('_')
self.mix_fname = self.mix_fname[1:] +''.join(['_'+str(i) for i in self.mole_fractions if i is not None])
self.pdb_filename = os.path.join(self.data_path_packmol, self.mix_fname+'.pdb')
packed_trj.save(self.pdb_filename)
return
def convert_to_gromacs(self, solute_index):
"""From AMBER-format prmtop and crd files, generate final solvated
GROMACS topology and coordinate files. Ensure that the desired "solute" (as per
solute_index) has a single monomer treated via a unique residue name to allow
treatment as a solute separate from other residues of the same name (if
desired). The solute will be given residue name "solute" Also, check to see if
there are "WAT" residues present, in which case tleap will have re-ordered
them to the end of the data file. If so, update data structures accordingly
and handle conversion appropriately.
Notes
-----
Currently, this function ensures that - after AMBER conversion reorders
water molecules with residue names 'WAT' to occur last in the resulting
parameter/coordinate files - the internal data structures are updated to
have the correct order in the relevant lists (labels, smiles_strings,
n_monomers). If for some reason GROMACS conversion were removed, these
would need to be updated elsewhere. (Probably this should be done anyway,
as this is not really a GROMACS issue.)
"""
# Read in AMBER format parameter/coordinate file and convert in gromacs
gromacs_topology = parmed.load_file(self.prmtop_filename, self.inpcrd_filename )
# Split the topology into components and check that we have the right number of components
components = gromacs_topology.split()
| |
22, 5):
if P[i] in p1:
c.append(1)
else:
c.append(0)
else:
if not 0 in c: pc.pop(pc.index(j))
if j==4:
c = []
for i in range(2, 23, 5):
if P[i] in p1:
c.append(1)
else:
c.append(0)
else:
if not 0 in c: pc.pop(pc.index(j))
if j==5:
c = []
for i in range(3, 24, 5):
if P[i] in p1:
c.append(1)
else:
c.append(0)
else:
if not 0 in c: pc.pop(pc.index(j))
if j==6:
c = []
for i in range(4, 25, 5):
if P[i] in p1:
c.append(1)
else:
c.append(0)
else:
if not 0 in c: pc.pop(pc.index(j))
if j==7:
c = []
for i in range(5):
if P[i] in p1:
c.append(1)
else:
c.append(0)
else:
if not 0 in c: pc.pop(pc.index(j))
if j==8:
c = []
for i in range(5, 10):
if P[i] in p1:
c.append(1)
else:
c.append(0)
else:
if not 0 in c: pc.pop(pc.index(j))
if j==9:
c = []
for i in range(10, 15):
if P[i] in p1:
c.append(1)
else:
c.append(0)
else:
if not 0 in c: pc.pop(pc.index(j))
if j==10:
c = []
for i in range(15, 20):
if P[i] in p1:
c.append(1)
else:
c.append(0)
else:
if not 0 in c: pc.pop(pc.index(j))
if j==11:
c = []
for i in range(20, 15):
if P[i] in p1:
c.append(1)
else:
c.append(0)
else:
if not 0 in c: pc.pop(pc.index(j))
################################################################################
# Main Program:--->
print('\n\n\n\n\n\n####################################################################')
print(' \\ // // ||=== || ||=== ||===|| ||\\ //|| ||===')
print(' \\ // \\ // ||== || || || || || \\// || ||==')
print(' \\ \\ ||=== ||=== ||=== ||===|| || || ||===')
sleep(0.6)
print()
print(' ==||== ||===||')
print(' || || ||')
print(' || ||===||')
sleep(0.6)
print()
print(' |> || |\ | /-- |--|')
print(' |> || | \| |__T |__|')
print('####################################################################\n\n\n\n\n\n\n\n')
print(' version: 0.2.20')
sleep(2)
while True:
if login():
try:
con = connect(host = 'localhost', user = 'root', password = '<PASSWORD>', database = 'Users')
csr = con.cursor()
csr.execute('SELECT Fname, Lname FROM PlrData WHERE UserID = \''+getUID+'\'')
fn, ln = csr.fetchone()
ex = ''
while True:
print('Do you want to Sign In as',fn,ln,'(Y/n): ',end = '')
ex = input().lower()
if not ex in ['y', 'n']:
print('Invalid Input!!!')
else:break
if ex == 'n': logout()
else: break
finally:
con.close()
else:
print('\n\nSELECT PRESS KEY')
print('------ ---------')
print('Sign In 1')
print('Sign Up 2')
print('Exit 3')
key = ''
while True:
key = input('Enter the key: ')
if not key in ['1', '2', '3']:
print('Invalid Input!!!')
else: break
if key == '1':
signIn()
break
elif key == '2':
signUp()
elif key == '3':
print('\n\nExiting...')
sleep(1)
break
if login():
try:
con = connect(host = 'localhost', user = 'root', password = '<PASSWORD>', database = 'Users')
csr = con.cursor()
csr.execute('SELECT Fname FROM PlrData WHERE UserID = \''+getUID+'\'')
name = csr.fetchone()[0]
print('\n\n Hey,', name)
finally:
csr.close()
con.close()
while login():
print('\n\n####################################################################')
print(' ||\\ //|| //\\ || ||\\ || ||\\ //|| ||=== ||\\ || || ||')
print(' || \\// || //==\\ || || \\ || || \\// || ||== || \\ || || ||')
print(' || || // \\ || || \\|| || || ||=== || \\|| ||===||')
print('####################################################################\n\n')
print('SELECT PRESS KEY')
print('------ ---------')
print('Play 1')
print('Profile 2')
print('Options 3')
print('Exit 4')
key = ''
while True:
key = input('Enter the key: ')
if not key in ['1', '2', '3', '4']:
print('Invalid Input!!!')
else: break
if key == '1':
try:
con = connect(host = 'localhost', user = 'root', password = '<PASSWORD>', database = 'Users')
csr = con.cursor()
rst, rds = play()
if rst == -1:
csr.execute('UPDATE PlrData SET TMD = TMD+1 WHERE log = 1')
csr.execute('SELECT FDR, LDR FROM PlrData WHERE log = 1')
f, l = csr.fetchone()
if f == 0 and l == 0:
csr.execute('UPDATE PlrData SET FDR = '+str(rds)+' WHERE log = 1')
csr.execute('UPDATE PlrData SET LDR = '+str(rds)+' WHERE log = 1')
else:
if rds < f:
csr.execute('UPDATE PlrData SET FDR = '+str(rds)+' WHERE log = 1')
elif rds > l:
csr.execute('UPDATE PlrData SET LDR = '+str(rds)+' WHERE log = 1')
elif rst:
csr.execute('UPDATE PlrData SET TMW = TMW+1 WHERE log = 1')
csr.execute('SELECT FWR, LWR FROM PlrData WHERE log = 1')
f, l = csr.fetchone()
if f == 0 and l == 0:
csr.execute('UPDATE PlrData SET FWR = '+str(rds)+' WHERE log = 1')
csr.execute('UPDATE PlrData SET LWR = '+str(rds)+' WHERE log = 1')
else:
if rds < f:
csr.execute('UPDATE PlrData SET FWR = '+str(rds)+' WHERE log = 1')
elif rds > l:
csr.execute('UPDATE PlrData SET LWR = '+str(rds)+' WHERE log = 1')
else:
csr.execute('UPDATE PlrData SET TML = TML+1 WHERE log = 1')
csr.execute('SELECT FLR, LLR FROM PlrData WHERE log = 1')
f, l = csr.fetchone()
if f == 0 and l == 0:
csr.execute('UPDATE PlrData SET FLR = '+str(rds)+' WHERE log = 1')
csr.execute('UPDATE PlrData SET LLR = '+str(rds)+' WHERE log = 1')
else:
if rds < f:
csr.execute('UPDATE PlrData SET FLR = '+str(rds)+' WHERE log = 1')
elif rds > l:
csr.execute('UPDATE PlrData SET LLR = '+str(rds)+' WHERE log = 1')
csr.execute('UPDATE PlrData SET TMP = TMP+1 WHERE log = 1')
input('Press ENTER to go Back To Main Menu...')
finally:
con.commit()
con.close()
elif key == '2':
print('\n\n####################################################################')
print(' ||==|| ||==// ||===|| ||=== || || ||===')
print(' ||==|| ||=// || || ||== || || ||==')
print(' || || \\ ||===|| || || ||=== ||===')
print('####################################################################\n\n')
print('SELECT PRESS KEY')
print('------ ---------')
print('Your Card 1')
print('Top Winners 2')
print('Top Loser 3')
print('Top Drawer 4')
print('Player\'s Card 5')
try:
con = connect(host = 'localhost', user = 'root', password = '<PASSWORD>', database = 'Users')
csr = con.cursor()
pk = ''
while True:
pk = input('Enter the key: ')
if not pk in ['1', '2', '3', '4', '5']:
print('Invalid Input!!!')
else: break
if pk == '1':
csr.execute('SELECT Fname, Lname, DOB, TMP, TMW, TML, TMD, FWR, LWR, FLR, LLR, FDR, LDR FROM PlrData WHERE log = 1')
FD = csr.fetchone()
ml = 0
for i in FD:
if ml < len(str(i)):ml = len(str(i))+1
for i in range(len(FD)):
print('+'+'-'*(ml+16), end = '+\n|')
if i == 0:
print('First Name'.ljust(15)+'|'+str(FD[i]).rjust(ml),end='|\n')
elif i == 1:
print('Last Name'.ljust(15)+'|'+str(FD[i]).rjust(ml),end='|\n')
elif i == 2:
dob = str(FD[i])
dl, dob = dob.split('-')[::-1], ''
for i in dl:dob += i+'-'
else:dob = dob[:-1]
print('Date of Birth'.ljust(15)+'|'+str(dob).rjust(ml),end='|\n')
elif i == 3:
print('Matches Played'.ljust(15)+'|'+str(FD[i]).center(ml),end='|\n')
elif i == 4:
print('Matches Won'.ljust(15)+'|'+str(FD[i]).center(ml),end='|\n')
wr = ''
if not FD[i-1]: wr = '0 %'
else:
wr = str((FD[i]/FD[i-1])*100)
wr = wr[:wr.index('.')]+' %'
print('+'+'-'*(ml+16), end='+\n|')
print('Win Ratio'.ljust(15)+'|'+wr.center(ml),end='|\n')
elif i == 5:
print('Matches Lose'.ljust(15)+'|'+str(FD[i]).center(ml),end='|\n')
lr = ''
if not FD[i-2]: lr = '0 %'
else:
lr = str((FD[i]/FD[i-2])*100)
lr = lr[:lr.index('.')]+' %'
print('+'+'-'*(ml+16), end='+\n|')
print('Lose Ratio'.ljust(15)+'|'+lr.center(ml),end='|\n')
elif i == 6:
print('Matches Draw'.ljust(15)+'|'+str(FD[i]).center(ml),end='|\n')
dr = ''
if not FD[i-3]: dr = '0 %'
else:
dr = str((FD[i]/FD[i-3])*100)
dr = dr[:dr.index('.')]+' %'
print('+'+'-'*(ml+16), end='+\n|')
print('Draw Ratio'.ljust(15)+'|'+dr.center(ml),end='|\n')
elif i == 7:
print('Fastest Win'.ljust(15)+'|'+(str(FD[i])+' Rounds').rjust(ml),end='|\n')
elif i == 8:
print('Longest Win'.ljust(15)+'|'+(str(FD[i])+' Rounds').rjust(ml),end='|\n')
elif i == 9:
print('Fastest Lose'.ljust(15)+'|'+(str(FD[i])+' Rounds').rjust(ml),end='|\n')
elif i == 10:
print('Longest Lose'.ljust(15)+'|'+(str(FD[i])+' Rounds').rjust(ml),end='|\n')
elif i == 11:
print('Fastest Draw'.ljust(15)+'|'+(str(FD[i])+' Rounds').rjust(ml),end='|\n')
elif i == 12:
print('Longest Draw'.ljust(15)+'|'+(str(FD[i])+' Rounds').rjust(ml),end='|\n')
else:
print('+'+'-'*(ml+16)+'+\n')
input('Press ENTER to go Back To Main Menu ...')
elif pk == '2':
csr.execute('SELECT Fname, Lname, TMP, TMW FROM PlrData ORDER BY TMW DESC')
dts = csr.fetchall()
n = 11
tp = 7
tw = 4
for i in dts:
if len(i[0]+' '+i[1]) > n: n = len(i[0]+' '+i[1])
if len(str(i[2])) > tp: tp = len(str(i[2]))
if len(str(i[3])) > tw: tw = len(str(i[3]))
print('+'+'-'*(n+12+tp+tw),end = '+\n|')
print('Player Name'.center(n)+'|'+'Matches'.center(tp)+'|'+'Wins'.center(tw)+'|Win Ratio|')
print('+'+'-'*(n+12+tp+tw)+'+')
for i in dts:
wr = ''
if i[2] == 0: wr = '0 %'
else:
wr = str((i[3]/i[2])*100)
wr = wr[:wr.index('.')]+' %'
print('|'+(i[0]+' '+i[1]).ljust(n)+'|'+str(i[2]).center(tp)+'|'+str(i[3]).center(tw)+'|'+wr.rjust(9), end = '|\n')
print('+'+'-'*(n+12+tp+tw)+'+')
else:input('Press ENTER to go Back To Main Menu ...')
elif pk == '3':
csr.execute('SELECT Fname, Lname, TMP, TML FROM PlrData ORDER BY TMW DESC')
dts = csr.fetchall()
n = 11
tp = 7
tw = 5
for i in dts:
if len(i[0]+' '+i[1]) > n: n = len(i[0]+' '+i[1])
if len(str(i[2])) > tp: tp = len(str(i[2]))
if len(str(i[3])) > tw: | |
raise Exception("Expected notes_ to be a str, received: {}".format(type(notes_)))
if series_ is not None and not isinstance(series_, (bytes, str)):
raise Exception("Expected series_ to be a str, received: {}".format(type(series_)))
if size_ is not None and not isinstance(size_, int):
raise Exception("Expected size_ to be a int, received: {}".format(type(size_)))
if started_ is not None and not isinstance(started_, (bytes, str)):
raise Exception("Expected started_ to be a str, received: {}".format(type(started_)))
if stored_ is not None and not isinstance(stored_, (bytes, str)):
raise Exception("Expected stored_ to be a str, received: {}".format(type(stored_)))
if version_ is not None and not isinstance(version_, (dict, Number)):
raise Exception("Expected version_ to be a Number, received: {}".format(type(version_)))
self.ca_cert = ca_cert_
self.ca_private_key = ca_private_key_
self.checksum = checksum_
self.checksum_format = checksum_format_
self.filename = filename_
self.finished = finished_
self.hostname = hostname_
self.id_ = id__
self.machine = machine_
self.model = model_
self.notes = notes_
self.series = series_
self.size = size_
self.started = started_
self.stored = stored_
self.version = version_
self.unknown_fields = unknown_fields
class BackupsRemoveArgs(Type):
_toSchema = {'ids': 'ids'}
_toPy = {'ids': 'ids'}
def __init__(self, ids=None, **unknown_fields):
'''
ids : typing.Sequence[str]
'''
ids_ = ids
# Validate arguments against known Juju API types.
if ids_ is not None and not isinstance(ids_, (bytes, str, list)):
raise Exception("Expected ids_ to be a Sequence, received: {}".format(type(ids_)))
self.ids = ids_
self.unknown_fields = unknown_fields
class Binary(Type):
_toSchema = {'arch': 'Arch', 'build': 'Build', 'major': 'Major', 'minor': 'Minor', 'number': 'Number', 'patch': 'Patch', 'series': 'Series', 'tag': 'Tag'}
_toPy = {'Arch': 'arch', 'Build': 'build', 'Major': 'major', 'Minor': 'minor', 'Number': 'number', 'Patch': 'patch', 'Series': 'series', 'Tag': 'tag'}
def __init__(self, arch=None, build=None, major=None, minor=None, number=None, patch=None, series=None, tag=None, **unknown_fields):
'''
arch : str
build : int
major : int
minor : int
number : Number
patch : int
series : str
tag : str
'''
arch_ = arch
build_ = build
major_ = major
minor_ = minor
number_ = Number.from_json(number) if number else None
patch_ = patch
series_ = series
tag_ = tag
# Validate arguments against known Juju API types.
if arch_ is not None and not isinstance(arch_, (bytes, str)):
raise Exception("Expected arch_ to be a str, received: {}".format(type(arch_)))
if build_ is not None and not isinstance(build_, int):
raise Exception("Expected build_ to be a int, received: {}".format(type(build_)))
if major_ is not None and not isinstance(major_, int):
raise Exception("Expected major_ to be a int, received: {}".format(type(major_)))
if minor_ is not None and not isinstance(minor_, int):
raise Exception("Expected minor_ to be a int, received: {}".format(type(minor_)))
if number_ is not None and not isinstance(number_, (dict, Number)):
raise Exception("Expected number_ to be a Number, received: {}".format(type(number_)))
if patch_ is not None and not isinstance(patch_, int):
raise Exception("Expected patch_ to be a int, received: {}".format(type(patch_)))
if series_ is not None and not isinstance(series_, (bytes, str)):
raise Exception("Expected series_ to be a str, received: {}".format(type(series_)))
if tag_ is not None and not isinstance(tag_, (bytes, str)):
raise Exception("Expected tag_ to be a str, received: {}".format(type(tag_)))
self.arch = arch_
self.build = build_
self.major = major_
self.minor = minor_
self.number = number_
self.patch = patch_
self.series = series_
self.tag = tag_
self.unknown_fields = unknown_fields
class Block(Type):
_toSchema = {'id_': 'id', 'message': 'message', 'tag': 'tag', 'type_': 'type'}
_toPy = {'id': 'id_', 'message': 'message', 'tag': 'tag', 'type': 'type_'}
def __init__(self, id_=None, message=None, tag=None, type_=None, **unknown_fields):
'''
id_ : str
message : str
tag : str
type_ : str
'''
id__ = id_
message_ = message
tag_ = tag
type__ = type_
# Validate arguments against known Juju API types.
if id__ is not None and not isinstance(id__, (bytes, str)):
raise Exception("Expected id__ to be a str, received: {}".format(type(id__)))
if message_ is not None and not isinstance(message_, (bytes, str)):
raise Exception("Expected message_ to be a str, received: {}".format(type(message_)))
if tag_ is not None and not isinstance(tag_, (bytes, str)):
raise Exception("Expected tag_ to be a str, received: {}".format(type(tag_)))
if type__ is not None and not isinstance(type__, (bytes, str)):
raise Exception("Expected type__ to be a str, received: {}".format(type(type__)))
self.id_ = id__
self.message = message_
self.tag = tag_
self.type_ = type__
self.unknown_fields = unknown_fields
class BlockDevice(Type):
_toSchema = {'busaddress': 'BusAddress', 'devicelinks': 'DeviceLinks', 'devicename': 'DeviceName', 'filesystemtype': 'FilesystemType', 'hardwareid': 'HardwareId', 'inuse': 'InUse', 'label': 'Label', 'mountpoint': 'MountPoint', 'size': 'Size', 'uuid': 'UUID', 'wwn': 'WWN'}
_toPy = {'BusAddress': 'busaddress', 'DeviceLinks': 'devicelinks', 'DeviceName': 'devicename', 'FilesystemType': 'filesystemtype', 'HardwareId': 'hardwareid', 'InUse': 'inuse', 'Label': 'label', 'MountPoint': 'mountpoint', 'Size': 'size', 'UUID': 'uuid', 'WWN': 'wwn'}
def __init__(self, busaddress=None, devicelinks=None, devicename=None, filesystemtype=None, hardwareid=None, inuse=None, label=None, mountpoint=None, size=None, uuid=None, wwn=None, **unknown_fields):
'''
busaddress : str
devicelinks : typing.Sequence[str]
devicename : str
filesystemtype : str
hardwareid : str
inuse : bool
label : str
mountpoint : str
size : int
uuid : str
wwn : str
'''
busaddress_ = busaddress
devicelinks_ = devicelinks
devicename_ = devicename
filesystemtype_ = filesystemtype
hardwareid_ = hardwareid
inuse_ = inuse
label_ = label
mountpoint_ = mountpoint
size_ = size
uuid_ = uuid
wwn_ = wwn
# Validate arguments against known Juju API types.
if busaddress_ is not None and not isinstance(busaddress_, (bytes, str)):
raise Exception("Expected busaddress_ to be a str, received: {}".format(type(busaddress_)))
if devicelinks_ is not None and not isinstance(devicelinks_, (bytes, str, list)):
raise Exception("Expected devicelinks_ to be a Sequence, received: {}".format(type(devicelinks_)))
if devicename_ is not None and not isinstance(devicename_, (bytes, str)):
raise Exception("Expected devicename_ to be a str, received: {}".format(type(devicename_)))
if filesystemtype_ is not None and not isinstance(filesystemtype_, (bytes, str)):
raise Exception("Expected filesystemtype_ to be a str, received: {}".format(type(filesystemtype_)))
if hardwareid_ is not None and not isinstance(hardwareid_, (bytes, str)):
raise Exception("Expected hardwareid_ to be a str, received: {}".format(type(hardwareid_)))
if inuse_ is not None and not isinstance(inuse_, bool):
raise Exception("Expected inuse_ to be a bool, received: {}".format(type(inuse_)))
if label_ is not None and not isinstance(label_, (bytes, str)):
raise Exception("Expected label_ to be a str, received: {}".format(type(label_)))
if mountpoint_ is not None and not isinstance(mountpoint_, (bytes, str)):
raise Exception("Expected mountpoint_ to be a str, received: {}".format(type(mountpoint_)))
if size_ is not None and not isinstance(size_, int):
raise Exception("Expected size_ to be a int, received: {}".format(type(size_)))
if uuid_ is not None and not isinstance(uuid_, (bytes, str)):
raise Exception("Expected uuid_ to be a str, received: {}".format(type(uuid_)))
if wwn_ is not None and not isinstance(wwn_, (bytes, str)):
raise Exception("Expected wwn_ to be a str, received: {}".format(type(wwn_)))
self.busaddress = busaddress_
self.devicelinks = devicelinks_
self.devicename = devicename_
self.filesystemtype = filesystemtype_
self.hardwareid = hardwareid_
self.inuse = inuse_
self.label = label_
self.mountpoint = mountpoint_
self.size = size_
self.uuid = uuid_
self.wwn = wwn_
self.unknown_fields = unknown_fields
class BlockDeviceResult(Type):
_toSchema = {'error': 'error', 'result': 'result'}
_toPy = {'error': 'error', 'result': 'result'}
def __init__(self, error=None, result=None, **unknown_fields):
'''
error : Error
result : BlockDevice
'''
error_ = Error.from_json(error) if error else None
result_ = BlockDevice.from_json(result) if result else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, (dict, BlockDevice)):
raise Exception("Expected result_ to be a BlockDevice, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = unknown_fields
class BlockDeviceResults(Type):
_toSchema = {'results': 'results'}
_toPy = {'results': 'results'}
def __init__(self, results=None, **unknown_fields):
'''
results : typing.Sequence[~BlockDeviceResult]
'''
results_ = [BlockDeviceResult.from_json(o) for o in results or []]
# Validate arguments against known Juju API types.
if results_ is not None and not isinstance(results_, (bytes, str, list)):
raise Exception("Expected results_ to be a Sequence, received: {}".format(type(results_)))
self.results = results_
self.unknown_fields = unknown_fields
class BlockResult(Type):
_toSchema = {'error': 'error', 'result': 'result'}
_toPy = {'error': 'error', 'result': 'result'}
def __init__(self, error=None, result=None, **unknown_fields):
'''
error : Error
result : Block
'''
error_ = Error.from_json(error) if error else None
result_ = Block.from_json(result) if result else None
# Validate arguments against known Juju API types.
if error_ is not None and not isinstance(error_, (dict, Error)):
raise Exception("Expected error_ to be a Error, received: {}".format(type(error_)))
if result_ is not None and not isinstance(result_, (dict, Block)):
raise Exception("Expected result_ to be a Block, received: {}".format(type(result_)))
self.error = error_
self.result = result_
self.unknown_fields = | |
:param max_message_size: The maximum allowed message size negotiated for the Link.
:type max_message_size: int
:param link_properties: Data to be sent in the Link ATTACH frame.
:type link_properties: dict
:param link_credit: The sender Link credit that determines how many
messages the Link will attempt to handle per connection iteration.
:type link_credit: int
:param max_frame_size: Maximum AMQP frame size. Default is 63488 bytes.
:type max_frame_size: int
:param channel_max: Maximum number of Session channels in the Connection.
:type channel_max: int
:param idle_timeout: Timeout in milliseconds after which the Connection will close
if there is no further activity.
:type idle_timeout: int
:param properties: Connection properties.
:type properties: dict
:param remote_idle_timeout_empty_frame_send_ratio: Ratio of empty frames to
idle time for Connections with no activity. Value must be between
0.0 and 1.0 inclusive. Default is 0.5.
:type remote_idle_timeout_empty_frame_send_ratio: float
:param incoming_window: The size of the allowed window for incoming messages.
:type incoming_window: int
:param outgoing_window: The size of the allowed window for outgoing messages.
:type outgoing_window: int
:param handle_max: The maximum number of concurrent link handles.
:type handle_max: int
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(self, target, auth=None, client_name=None, debug=False, msg_timeout=0, **kwargs):
target = target if isinstance(target, address.Address) else address.Target(target)
self._msg_timeout = msg_timeout
self._pending_messages = []
self._message_sender = None
self._shutdown = None
# Sender and Link settings
self._send_settle_mode = kwargs.pop('send_settle_mode', None) or constants.SenderSettleMode.Unsettled
self._max_message_size = kwargs.pop('max_message_size', None) or constants.MAX_MESSAGE_LENGTH_BYTES
self._link_properties = kwargs.pop('link_properties', None)
self._link_credit = kwargs.pop('link_credit', None)
# AMQP object settings
self.sender_type = sender.MessageSender
super(SendClient, self).__init__(target, auth=auth, client_name=client_name, debug=debug, **kwargs)
def _client_ready(self):
"""Determine whether the client is ready to start sending messages.
To be ready, the connection must be open and authentication complete,
The Session, Link and MessageSender must be open and in non-errored
states.
:returns: bool
:raises: ~uamqp.errors.AMQPConnectionError if the MessageSender
goes into an error state.
"""
# pylint: disable=protected-access
if not self._message_sender:
self._message_sender = self.sender_type(
self._session, self._name, self._remote_address,
name='sender-link-{}'.format(uuid.uuid4()),
debug=self._debug_trace,
send_settle_mode=self._send_settle_mode,
max_message_size=self._max_message_size,
link_credit=self._link_credit,
properties=self._link_properties,
encoding=self._encoding)
self._message_sender.open()
return False
elif self._message_sender._state == constants.MessageSenderState.Error:
raise errors.AMQPConnectionError(
"Message Sender Client was unable to open. "
"Please confirm credentials and access permissions."
"\nSee debug trace for more details.")
elif self._message_sender._state != constants.MessageSenderState.Open:
return False
return True
def _client_run(self):
"""MessageSender Link is now open - perform message send
on all pending messages.
Will return True if operation successful and client can remain open for
further work.
:returns: bool
"""
# pylint: disable=protected-access
for message in self._pending_messages[:]:
if message.state in [constants.MessageState.Complete, constants.MessageState.Failed]:
try:
self._pending_messages.remove(message)
except ValueError:
pass
elif message.state == constants.MessageState.WaitingToBeSent:
message.state = constants.MessageState.WaitingForAck
try:
current_time = self._counter.get_current_ms()
elapsed_time = (current_time - message.idle_time)/1000
if self._msg_timeout > 0 and elapsed_time > self._msg_timeout:
message._on_message_sent(constants.MessageSendResult.Timeout)
else:
timeout = self._msg_timeout - elapsed_time if self._msg_timeout > 0 else 0
self._message_sender.send_async(message, timeout=timeout)
except Exception as exp: # pylint: disable=broad-except
message._on_message_sent(constants.MessageSendResult.Error, error=exp)
self._connection.work()
return True
def close(self):
"""Close down the client. No further messages
can be sent and the client cannot be re-opened.
All pending, unsent messages will be cleared.
"""
if self._message_sender:
self._message_sender.destroy()
self._message_sender = None
super(SendClient, self).close()
self._pending_messages = []
def queue_message(self, messages):
"""Add a message to the send queue.
No further action will be taken until either SendClient.wait()
or SendClient.send_all_messages() has been called.
The client does not need to be open yet for messages to be added
to the queue.
:param messages: A message to send. This can either be a single instance
of ~uamqp.Message, or multiple messages wrapped in an instance
of ~uamqp.BatchMessage.
:type message: ~uamqp.Message
"""
for message in messages.gather():
message.idle_time = self._counter.get_current_ms()
self._pending_messages.append(message)
def send_message(self, messages, close_on_done=False):
"""Send a single message or batched message.
:param messages: A message to send. This can either be a single instance
of ~uamqp.Message, or multiple messages wrapped in an instance
of ~uamqp.BatchMessage.
:type message: ~uamqp.Message
:param close_on_done: Close the client once the message is sent. Default is `False`.
:type close_on_done: bool
:raises: ~uamqp.errors.MessageSendFailed if message fails to send after retry policy
is exhausted.
"""
batch = messages.gather()
pending_batch = []
for message in batch:
message.idle_time = self._counter.get_current_ms()
self._pending_messages.append(message)
pending_batch.append(message)
self.open()
try:
while any([m for m in pending_batch if m.state not in constants.DONE_STATES]):
self.do_work()
except:
raise
else:
failed = [m for m in pending_batch if m.state == constants.MessageState.Failed]
if any(failed):
raise errors.MessageSendFailed("Failed to send message.")
finally:
if close_on_done:
self.close()
def messages_pending(self):
"""Check whether the client is holding any unsent
messages in the queue.
:returns: bool
"""
return bool(self._pending_messages)
def wait(self):
"""Run the client until all pending message in the queue
have been processed.
"""
while self.messages_pending():
self.do_work()
def send_all_messages(self, close_on_done=True):
"""Send all pending messages in the queue. This will return a list
of the send result of all the pending messages so it can be
determined if any messages failed to send.
This function will open the client if it is not already open.
:param close_on_done: Close the client once the messages are sent.
Default is `True`.
:type close_on_done: bool
:returns: list[~uamqp.constants.MessageState]
"""
self.open()
try:
messages = self._pending_messages[:]
self.wait()
except:
raise
else:
results = [m.state for m in messages]
return results
finally:
if close_on_done:
self.close()
class ReceiveClient(AMQPClient):
"""An AMQP client for receiving messages.
:param target: The source AMQP service endpoint. This can either be the URI as
a string or a ~uamqp.Source object.
:type target: str, bytes or ~uamqp.Source
:param auth: Authentication for the connection. If none is provided SASL Annoymous
authentication will be used.
:type auth: ~uamqp.authentication.AMQPAuth
:param client_name: The name for the client, also known as the Container ID.
If no name is provided, a random GUID will be used.
:type client_name: str or bytes
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:param timeout: A timeout in milliseconds. The receiver will shut down if no
new messages are received after the specified timeout. If set to 0, the receiver
will never timeout and will continue to listen. The default is 0.
:type timeout: int
:param receive_settle_mode: The mode by which to settle message receive
operations. If set to `PeekLock`, the receiver will lock a message once received until
the client accepts or rejects the message. If set to `ReceiveAndDelete`, the service
will assume successful receipt of the message and clear it from the queue. The
default is `PeekLock`.
:type receive_settle_mode: ~uamqp.constants.ReceiverSettleMode
:param max_message_size: The maximum allowed message size negotiated for the Link.
:type max_message_size: int
:param link_properties: Data to be sent in the Link ATTACH frame.
:type link_properties: dict
:param prefetch: The receiver Link credit that determines how many
messages the Link will attempt to handle per connection iteration.
The default is 300.
:type prefetch: int
:param max_frame_size: Maximum AMQP frame size. Default is 63488 bytes.
:type max_frame_size: int
:param channel_max: Maximum number of Session channels in the Connection.
:type channel_max: int
:param idle_timeout: Timeout in milliseconds after which the Connection will close
if there is no further activity.
:type idle_timeout: int
:param properties: Connection properties.
:type properties: dict
:param remote_idle_timeout_empty_frame_send_ratio: Ratio of empty frames to
idle time for Connections with no activity. Value must be between
0.0 and 1.0 inclusive. Default is 0.5.
:type remote_idle_timeout_empty_frame_send_ratio: float
:param incoming_window: The size of the allowed window for incoming messages.
:type incoming_window: int
:param outgoing_window: The size of the allowed window for outgoing messages.
:type outgoing_window: int
:param handle_max: The maximum number of concurrent link handles.
:type handle_max: int
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(self, source, auth=None, client_name=None, debug=False, timeout=0, **kwargs):
source = source if isinstance(source, address.Address) else address.Source(source)
self._timeout = timeout
self._message_receiver = None
self._last_activity_timestamp = None
self._was_message_received = False
self._message_received_callback = None
self._received_messages = None
# Receiver and Link settings
self._receive_settle_mode = kwargs.pop('receive_settle_mode', None) or constants.ReceiverSettleMode.PeekLock
self._max_message_size = kwargs.pop('max_message_size', None) or constants.MAX_MESSAGE_LENGTH_BYTES
self._prefetch = kwargs.pop('prefetch', None) or 300
self._link_properties = kwargs.pop('link_properties', | |
""" ietf_netconf_acm
NETCONF Access Control Model.
Copyright (c) 2012 IETF Trust and the persons identified as
authors of the code. All rights reserved.
Redistribution and use in source and binary forms, with or
without modification, is permitted pursuant to, and subject
to the license terms contained in, the Simplified BSD
License set forth in Section 4.c of the IETF Trust's
Legal Provisions Relating to IETF Documents
(http\://trustee.ietf.org/license\-info).
This version of this YANG module is part of RFC 6536; see
the RFC itself for full legal notices.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class ActionType(Enum):
"""
ActionType (Enum Class)
Action taken by the server when a particular
rule matches.
.. data:: permit = 0
Requested action is permitted.
.. data:: deny = 1
Requested action is denied.
"""
permit = Enum.YLeaf(0, "permit")
deny = Enum.YLeaf(1, "deny")
class Nacm(Entity):
"""
Parameters for NETCONF Access Control Model.
.. attribute:: enable_nacm
Enables or disables all NETCONF access control enforcement. If 'true', then enforcement is enabled. If 'false', then enforcement is disabled
**type**\: bool
**default value**\: true
.. attribute:: read_default
Controls whether read access is granted if no appropriate rule is found for a particular read request
**type**\: :py:class:`ActionType <ydk.models.ietf.ietf_netconf_acm.ActionType>`
**default value**\: permit
.. attribute:: write_default
Controls whether create, update, or delete access is granted if no appropriate rule is found for a particular write request
**type**\: :py:class:`ActionType <ydk.models.ietf.ietf_netconf_acm.ActionType>`
**default value**\: deny
.. attribute:: exec_default
Controls whether exec access is granted if no appropriate rule is found for a particular protocol operation request
**type**\: :py:class:`ActionType <ydk.models.ietf.ietf_netconf_acm.ActionType>`
**default value**\: permit
.. attribute:: enable_external_groups
Controls whether the server uses the groups reported by the NETCONF transport layer when it assigns the user to a set of NACM groups. If this leaf has the value 'false', any group names reported by the transport layer are ignored by the server
**type**\: bool
**default value**\: true
.. attribute:: denied_operations
Number of times since the server last restarted that a protocol operation request was denied
**type**\: int
**range:** 0..4294967295
**mandatory**\: True
.. attribute:: denied_data_writes
Number of times since the server last restarted that a protocol operation request to alter a configuration datastore was denied
**type**\: int
**range:** 0..4294967295
**mandatory**\: True
.. attribute:: denied_notifications
Number of times since the server last restarted that a notification was dropped for a subscription because access to the event type was denied
**type**\: int
**range:** 0..4294967295
**mandatory**\: True
.. attribute:: groups
NETCONF Access Control Groups
**type**\: :py:class:`Groups <ydk.models.ietf.ietf_netconf_acm.Nacm.Groups>`
.. attribute:: rule_list
An ordered collection of access control rules
**type**\: list of :py:class:`RuleList <ydk.models.ietf.ietf_netconf_acm.Nacm.RuleList>`
"""
_prefix = 'nacm'
_revision = '2012-02-22'
def __init__(self):
super(Nacm, self).__init__()
self._top_entity = None
self.yang_name = "nacm"
self.yang_parent_name = "ietf-netconf-acm"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("groups", ("groups", Nacm.Groups)), ("rule-list", ("rule_list", Nacm.RuleList))])
self._leafs = OrderedDict([
('enable_nacm', (YLeaf(YType.boolean, 'enable-nacm'), ['bool'])),
('read_default', (YLeaf(YType.enumeration, 'read-default'), [('ydk.models.ietf.ietf_netconf_acm', 'ActionType', '')])),
('write_default', (YLeaf(YType.enumeration, 'write-default'), [('ydk.models.ietf.ietf_netconf_acm', 'ActionType', '')])),
('exec_default', (YLeaf(YType.enumeration, 'exec-default'), [('ydk.models.ietf.ietf_netconf_acm', 'ActionType', '')])),
('enable_external_groups', (YLeaf(YType.boolean, 'enable-external-groups'), ['bool'])),
('denied_operations', (YLeaf(YType.uint32, 'denied-operations'), ['int'])),
('denied_data_writes', (YLeaf(YType.uint32, 'denied-data-writes'), ['int'])),
('denied_notifications', (YLeaf(YType.uint32, 'denied-notifications'), ['int'])),
])
self.enable_nacm = None
self.read_default = None
self.write_default = None
self.exec_default = None
self.enable_external_groups = None
self.denied_operations = None
self.denied_data_writes = None
self.denied_notifications = None
self.groups = Nacm.Groups()
self.groups.parent = self
self._children_name_map["groups"] = "groups"
self.rule_list = YList(self)
self._segment_path = lambda: "ietf-netconf-acm:nacm"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Nacm, ['enable_nacm', 'read_default', 'write_default', 'exec_default', 'enable_external_groups', 'denied_operations', 'denied_data_writes', 'denied_notifications'], name, value)
class Groups(Entity):
"""
NETCONF Access Control Groups.
.. attribute:: group
One NACM Group Entry. This list will only contain configured entries, not any entries learned from any transport protocols
**type**\: list of :py:class:`Group <ydk.models.ietf.ietf_netconf_acm.Nacm.Groups.Group>`
"""
_prefix = 'nacm'
_revision = '2012-02-22'
def __init__(self):
super(Nacm.Groups, self).__init__()
self.yang_name = "groups"
self.yang_parent_name = "nacm"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("group", ("group", Nacm.Groups.Group))])
self._leafs = OrderedDict()
self.group = YList(self)
self._segment_path = lambda: "groups"
self._absolute_path = lambda: "ietf-netconf-acm:nacm/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Nacm.Groups, [], name, value)
class Group(Entity):
"""
One NACM Group Entry. This list will only contain
configured entries, not any entries learned from
any transport protocols.
.. attribute:: name (key)
Group name associated with this entry
**type**\: str
**pattern:** [^\\\*].\*
.. attribute:: user_name
Each entry identifies the username of a member of the group associated with this entry
**type**\: list of str
**length:** 1..18446744073709551615
"""
_prefix = 'nacm'
_revision = '2012-02-22'
def __init__(self):
super(Nacm.Groups.Group, self).__init__()
self.yang_name = "group"
self.yang_parent_name = "groups"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['name']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('name', (YLeaf(YType.str, 'name'), ['str'])),
('user_name', (YLeafList(YType.str, 'user-name'), ['str'])),
])
self.name = None
self.user_name = []
self._segment_path = lambda: "group" + "[name='" + str(self.name) + "']"
self._absolute_path = lambda: "ietf-netconf-acm:nacm/groups/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Nacm.Groups.Group, ['name', 'user_name'], name, value)
class RuleList(Entity):
"""
An ordered collection of access control rules.
.. attribute:: name (key)
Arbitrary name assigned to the rule\-list
**type**\: str
**length:** 1..18446744073709551615
.. attribute:: group
List of administrative groups that will be assigned the associated access rights defined by the 'rule' list. The string '\*' indicates that all groups apply to the entry
**type**\: union of the below types:
**type**\: list of str
**pattern:** \\\*
**type**\: list of str
**pattern:** [^\\\*].\*
.. attribute:: rule
One access control rule. Rules are processed in user\-defined order until a match is found. A rule matches if 'module\-name', 'rule\-type', and 'access\-operations' match the request. If a rule matches, the 'action' leaf determines if access is granted or not
**type**\: list of :py:class:`Rule <ydk.models.ietf.ietf_netconf_acm.Nacm.RuleList.Rule>`
"""
_prefix = 'nacm'
_revision = '2012-02-22'
def __init__(self):
super(Nacm.RuleList, self).__init__()
self.yang_name = "rule-list"
self.yang_parent_name = "nacm"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['name']
self._child_classes = OrderedDict([("rule", ("rule", Nacm.RuleList.Rule))])
self._leafs = OrderedDict([
('name', (YLeaf(YType.str, 'name'), ['str'])),
('group', (YLeafList(YType.str, 'group'), ['str','str'])),
])
self.name = None
self.group = []
self.rule = YList(self)
self._segment_path = lambda: "rule-list" + "[name='" + str(self.name) + "']"
self._absolute_path = lambda: "ietf-netconf-acm:nacm/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Nacm.RuleList, ['name', 'group'], name, value)
class Rule(Entity):
"""
One access control rule.
Rules are processed in user\-defined order until a match is
found. A rule matches if 'module\-name', 'rule\-type', and
'access\-operations' match the request. If a rule
matches, the 'action' leaf determines if access is granted
or not.
.. attribute:: name (key)
Arbitrary name assigned to the rule
**type**\: str
**length:** 1..18446744073709551615
.. attribute:: module_name
Name of the module associated with this rule. This leaf matches if it has the value '\*' or if the object being accessed is defined in the module with the specified module name
**type**\: union of the below types:
**type**\: str
**pattern:** \\\*
**type**\: str
**default value**\: *
.. attribute:: rpc_name
This leaf matches if it has the value '\*' or if its value equals the requested protocol operation name
**type**\: union of the below types:
**type**\: str
**pattern:** \\\*
**type**\: str
.. attribute:: notification_name
This leaf matches if it has the value '\*' or if its value equals the requested notification name
**type**\: union of the below types:
**type**\: str
**pattern:** \\\*
**type**\: str
.. attribute:: path
Data Node Instance Identifier associated with the data node controlled by this rule. Configuration data or state data instance identifiers start with a top\-level data node. | |
< -180:
dv.append(diff + 360)
else:
dv.append(diff)
dr = np.array(end_d['r']) - np.array(beg_d['r'])
return (np.array(dv), dr)
def _add_scalar_data(self, data):
"""Add scalar values to dataset"""
array_id = self._mesh_dataset.point_data.add_array(data)
self._mesh_dataset.point_data.get_array(array_id).name = array_id
self._mesh_dataset.point_data.update()
# build visualization pipeline
with warnings.catch_warnings(record=True):
pipe = mlab.pipeline.set_active_attribute(
self._mesh_dataset, point_scalars=array_id, figure=self._f)
# The new data-source is added to the wrong figure by default
# (a Mayavi bug??)
if pipe.parent not in self._f.children:
self._f.add_child(pipe.parent)
self._mesh_clones[array_id] = pipe.parent
return array_id, pipe
def _remove_scalar_data(self, array_id):
"""Removes scalar data"""
self._mesh_clones.pop(array_id).remove()
self._mesh_dataset.point_data.remove_array(array_id)
def _add_vector_data(self, vectors, vector_values, fmin, fmid, fmax,
scale_factor_norm, vertices, vector_alpha, lut):
vertices = slice(None) if vertices is None else vertices
x, y, z = np.array(self._geo_mesh.data.points.data)[vertices].T
vector_alpha = min(vector_alpha, 0.9999999)
with warnings.catch_warnings(record=True): # HasTraits
quiver = mlab.quiver3d(
x, y, z, vectors[:, 0], vectors[:, 1], vectors[:, 2],
scalars=vector_values, colormap='hot', vmin=fmin,
vmax=fmax, figure=self._f, opacity=vector_alpha)
# Enable backface culling
quiver.actor.property.backface_culling = True
quiver.mlab_source.update()
# Compute scaling for the glyphs
quiver.glyph.glyph.scale_factor = (scale_factor_norm *
vector_values.max())
# Scale colormap used for the glyphs
l_m = quiver.parent.vector_lut_manager
l_m.load_lut_from_list(lut / 255.)
l_m.data_range = np.array([fmin, fmax])
return quiver
def _remove_vector_data(self, glyphs):
if glyphs is not None:
glyphs.parent.parent.remove()
def add_overlay(self, old):
"""Add an overlay to the overlay dict from a file or array"""
array_id, mesh = self._add_scalar_data(old.mlab_data)
if old.pos_lims is not None:
with warnings.catch_warnings(record=True):
pos_thresh = threshold_filter(mesh, low=old.pos_lims[0])
pos = mlab.pipeline.surface(
pos_thresh, colormap="YlOrRd", figure=self._f,
vmin=old.pos_lims[1], vmax=old.pos_lims[2],
reset_zoom=False)
pos.actor.property.backface_culling = False
pos_bar = mlab.scalarbar(pos, nb_labels=5)
pos_bar.reverse_lut = True
pos_bar.scalar_bar_representation.position = (0.53, 0.01)
pos_bar.scalar_bar_representation.position2 = (0.42, 0.09)
pos_bar.label_text_property.color = self._fg_color
else:
pos = pos_bar = None
if old.neg_lims is not None:
with warnings.catch_warnings(record=True):
neg_thresh = threshold_filter(mesh, up=old.neg_lims[0])
neg = mlab.pipeline.surface(
neg_thresh, colormap="PuBu", figure=self._f,
vmin=old.neg_lims[1], vmax=old.neg_lims[2],
reset_zoom=False)
neg.actor.property.backface_culling = False
neg_bar = mlab.scalarbar(neg, nb_labels=5)
neg_bar.scalar_bar_representation.position = (0.05, 0.01)
neg_bar.scalar_bar_representation.position2 = (0.42, 0.09)
neg_bar.label_text_property.color = self._fg_color
else:
neg = neg_bar = None
return OverlayDisplay(self, array_id, pos, pos_bar, neg, neg_bar)
@verbose
def add_data(self, array, fmin, fmid, fmax, thresh, lut, colormap, alpha,
colorbar, layer_id, smooth_mat, magnitude, magnitude_max,
scale_factor, vertices, vector_alpha):
"""Add data to the brain"""
# Calculate initial data to plot
if array.ndim == 1:
array_plot = array
elif array.ndim == 2:
array_plot = array[:, 0]
elif array.ndim == 3:
assert array.shape[1] == 3 # should always be true
assert magnitude is not None
assert scale_factor is not None
array_plot = magnitude[:, 0]
else:
raise ValueError("data has to be 1D, 2D, or 3D")
vector_values = array_plot
if smooth_mat is not None:
array_plot = smooth_mat * array_plot
# Copy and byteswap to deal with Mayavi bug
array_plot = _prepare_data(array_plot)
array_id, pipe = self._add_scalar_data(array_plot)
scale_factor_norm = None
if array.ndim == 3:
scale_factor_norm = scale_factor / magnitude_max
vectors = array[:, :, 0].copy()
glyphs = self._add_vector_data(
vectors, vector_values, fmin, fmid, fmax,
scale_factor_norm, vertices, vector_alpha, lut)
else:
glyphs = None
del scale_factor
mesh = pipe.parent
if thresh is not None:
if array_plot.min() >= thresh:
warn("Data min is greater than threshold.")
else:
with warnings.catch_warnings(record=True):
pipe = threshold_filter(pipe, low=thresh, figure=self._f)
with warnings.catch_warnings(record=True):
surf = mlab.pipeline.surface(
pipe, colormap=colormap, vmin=fmin, vmax=fmax,
opacity=float(alpha), figure=self._f, reset_zoom=False)
surf.actor.property.backface_culling = False
# apply look up table if given
if lut is not None:
l_m = surf.module_manager.scalar_lut_manager
l_m.load_lut_from_list(lut / 255.)
# Get the original colormap table
orig_ctable = \
surf.module_manager.scalar_lut_manager.lut.table.to_array().copy()
# Get the colorbar
if colorbar:
bar = mlab.scalarbar(surf)
bar.label_text_property.color = self._fg_color
bar.scalar_bar_representation.position2 = .8, 0.09
else:
bar = None
self.data[layer_id] = dict(
array_id=array_id, mesh=mesh, glyphs=glyphs,
scale_factor_norm=scale_factor_norm)
return surf, orig_ctable, bar, glyphs
def add_annotation(self, annot, ids, cmap):
"""Add an annotation file"""
# Add scalar values to dataset
array_id, pipe = self._add_scalar_data(ids)
with warnings.catch_warnings(record=True):
surf = mlab.pipeline.surface(pipe, name=annot, figure=self._f,
reset_zoom=False)
surf.actor.property.backface_culling = False
# Set the color table
l_m = surf.module_manager.scalar_lut_manager
l_m.load_lut_from_list(cmap / 255.)
# Set the brain attributes
return dict(surface=surf, name=annot, colormap=cmap, brain=self,
array_id=array_id)
def add_label(self, label, label_name, color, alpha):
"""Add an ROI label to the image"""
from matplotlib.colors import colorConverter
array_id, pipe = self._add_scalar_data(label)
with warnings.catch_warnings(record=True):
surf = mlab.pipeline.surface(pipe, name=label_name, figure=self._f,
reset_zoom=False)
surf.actor.property.backface_culling = False
color = colorConverter.to_rgba(color, alpha)
cmap = np.array([(0, 0, 0, 0,), color])
l_m = surf.module_manager.scalar_lut_manager
# for some reason (traits?) using `load_lut_from_list` here does
# not work (.data_range needs to be tweaked in this case),
# but setting the table directly does:
l_m.lut.table = np.round(cmap * 255).astype(np.uint8)
return array_id, surf
def add_morphometry(self, morph_data, colormap, measure,
min, max, colorbar):
"""Add a morphometry overlay to the image"""
array_id, pipe = self._add_scalar_data(morph_data)
with warnings.catch_warnings(record=True):
surf = mlab.pipeline.surface(
pipe, colormap=colormap, vmin=min, vmax=max, name=measure,
figure=self._f, reset_zoom=False)
# Get the colorbar
if colorbar:
bar = mlab.scalarbar(surf)
bar.label_text_property.color = self._fg_color
bar.scalar_bar_representation.position2 = .8, 0.09
else:
bar = None
# Fil in the morphometry dict
return dict(surface=surf, colorbar=bar, measure=measure, brain=self,
array_id=array_id)
def add_foci(self, foci_coords, scale_factor, color, alpha, name):
"""Add spherical foci, possibly mapping to displayed surf"""
# Create the visualization
with warnings.catch_warnings(record=True): # traits
points = mlab.points3d(
foci_coords[:, 0], foci_coords[:, 1], foci_coords[:, 2],
np.ones(foci_coords.shape[0]), name=name, figure=self._f,
scale_factor=(10. * scale_factor), color=color, opacity=alpha)
return points
def add_contour_overlay(self, scalar_data, min=None, max=None,
n_contours=7, line_width=1.5, lut=None,
colorbar=True):
"""Add a topographic contour overlay of the positive data"""
array_id, pipe = self._add_scalar_data(scalar_data)
with warnings.catch_warnings(record=True):
thresh = threshold_filter(pipe, low=min)
surf = mlab.pipeline.contour_surface(
thresh, contours=n_contours, line_width=line_width,
reset_zoom=False)
if lut is not None:
l_m = surf.module_manager.scalar_lut_manager
l_m.load_lut_from_list(lut / 255.)
# Set the colorbar and range correctly
with warnings.catch_warnings(record=True): # traits
bar = mlab.scalarbar(surf, nb_colors=n_contours,
nb_labels=n_contours + 1)
bar.data_range = min, max
bar.label_text_property.color = self._fg_color
bar.scalar_bar_representation.position2 = .8, 0.09
if not colorbar:
bar.visible = False
# Set up a dict attribute with pointers at important things
return dict(surface=surf, colorbar=bar, brain=self, array_id=array_id)
def add_text(self, x, y, text, name, color=None, opacity=1.0):
""" Add a text to the visualization"""
color = self._fg_color if color is None else color
with warnings.catch_warnings(record=True):
text = mlab.text(x, y, text, name=name, color=color,
opacity=opacity, figure=self._f)
return text
def remove_data(self, layer_id):
"""Remove data shown with .add_data()"""
data = self.data.pop(layer_id)
self._remove_scalar_data(data['array_id'])
self._remove_vector_data(data['glyphs'])
def set_data(self, layer_id, values, vectors=None, vector_values=None):
"""Set displayed data values and vectors."""
data = self.data[layer_id]
self._mesh_dataset.point_data.get_array(
data['array_id']).from_array(values)
# avoid "AttributeError: 'Scene' object has no attribute 'update'"
data['mesh'].update()
if vectors is not None:
q = data['glyphs']
# extract params that will change after calling .update()
l_m = q.parent.vector_lut_manager
data_range = np.array(l_m.data_range)
lut = l_m.lut.table.to_array().copy()
# Update glyphs
q.mlab_source.vectors = vectors
q.mlab_source.scalars = vector_values
q.mlab_source.update()
# Update changed parameters, and glyph scaling
q.glyph.glyph.scale_factor = (data['scale_factor_norm'] *
values.max())
l_m.load_lut_from_list(lut / 255.)
l_m.data_range = data_range
def _orient_lights(self):
"""Set lights to come from same direction relative to brain."""
if self.hemi == "rh":
if self._f.scene is not None and \
self._f.scene.light_manager is not None:
for light in self._f.scene.light_manager.lights:
light.azimuth *= -1
def update_surf(self):
"""Update surface mesh after mesh coordinates change."""
with warnings.catch_warnings(record=True): # traits
self._geo_mesh.update()
for mesh in self._mesh_clones.values():
mesh.update()
class OverlayData(object):
"""Encapsulation of statistical neuroimaging overlay viz data"""
def __init__(self, scalar_data, min, max, sign):
if scalar_data.min() >= 0:
sign = "pos"
elif scalar_data.max() <= 0:
sign = "neg"
if sign in ["abs", "pos"]:
# Figure out the correct threshold to avoid TraitErrors
# This seems like not the cleanest way to do this
pos_max = np.max((0.0, np.max(scalar_data)))
if pos_max < min:
thresh_low = pos_max
else:
thresh_low = min
self.pos_lims = [thresh_low, min, max]
else:
self.pos_lims = None
if sign in ["abs", "neg"]:
# Figure out the correct threshold to avoid TraitErrors
# This seems even less clean due to negative convolutedness
neg_min = np.min((0.0, np.min(scalar_data)))
if neg_min > -min:
thresh_up = neg_min
else:
thresh_up = -min
self.neg_lims = [thresh_up, -max, -min]
else:
self.neg_lims = None
# Byte swap copy; due to mayavi bug
self.mlab_data = _prepare_data(scalar_data)
class OverlayDisplay():
"""Encapsulation of overlay viz plotting"""
def __init__(self, brain, array_id, pos, pos_bar, neg, neg_bar):
self._brain = brain
self._array_id = array_id
self.pos = pos
self.pos_bar = pos_bar
self.neg = neg
self.neg_bar = neg_bar
def remove(self):
self._brain._remove_scalar_data(self._array_id)
if self.pos_bar is not None:
self.pos_bar.visible = False
if self.neg_bar is not None:
self.neg_bar.visible = False
class TimeViewer(HasTraits):
"""TimeViewer object providing a GUI for visualizing time series
Useful for visualizing M/EEG inverse solutions on Brain object(s).
Parameters
----------
brain : Brain (or list of Brain)
brain(s) to control
"""
# Nested | |
from __future__ import print_function
import json
import os
from faker import Faker
from flask import Flask, request
from flask_restful import Api
from nose.tools import eq_
from jsonpath_ng import jsonpath, parse
from halo_flask.base_util import BaseUtil
from halo_flask.flask.utilx import Util, status
from halo_flask.flask.mixinx import AbsApiMixinX,PerfMixinX
from halo_flask.flask.viewsx import PerfLinkX
from halo_flask.exceptions import ApiError
from halo_flask.logs import log_json
from halo_flask import saga
from halo_flask.const import HTTPChoice
from halo_flask.apis import CnnApi,GoogleApi,TstApi
from halo_flask.flask.viewsx import Resource,AbsBaseLinkX
from halo_flask.request import HaloContext
from halo_flask.apis import load_api_config
from halo_flask.ssm import set_app_param_config,get_app_param_config,set_host_param_config
from halo_flask.flask.viewsx import load_global_data
import unittest
#6,7,9923,9941 failing
fake = Faker()
app = Flask(__name__)
api = Api(app)
from halo_flask.request import HaloRequest
from halo_flask.response import HaloResponse
class A1(AbsApiMixinX):
def set_back_api(self,halo_request, foi=None):
if not foi:#not in seq
if not halo_request.sub_func:#not in bq
if halo_request.request.method == HTTPChoice.delete.value:
return CnnApi(halo_request.context,HTTPChoice.delete.value)
return super(A1,self).set_back_api(halo_request,foi)
def extract_json(self,halo_request, back_response, seq=None):
if seq == None:#no event
if halo_request.request.method == HTTPChoice.get.value:#method type
return {"tst_get":"good"}
if halo_request.request.method == HTTPChoice.delete.value:#method type
return {"tst_delete":"good"}
else:#in event
if halo_request.request.method == HTTPChoice.put.value:#method type
if seq == '1':
return {"tst_put":"good1"}
if seq == '2':
return {"tst_put":"good2"}
if halo_request.request.method == HTTPChoice.post.value:#method type
if seq == '1':
return {"tst_post":"good1"}
if seq == '2':
return {"tst_post":"good2"}
if halo_request.request.method == HTTPChoice.patch.value:#method type
return {"tst_patch":"good"}
class A3(AbsApiMixinX):
def do_operation(self, halo_request):
# 1. validate input params
self.validate_req(halo_request)
# 2. run pre conditions
self.validate_pre(halo_request)
# 3. processing engine abc
# 4. Build the payload target response structure which is Compliant
payload = self.create_resp_payload(halo_request, {})
# 5. setup headers for reply
headers = self.set_resp_headers(halo_request, halo_request.request.headers)
# 6. build json and add to halo response
halo_response = self.create_response(halo_request, payload, headers)
# 7. post condition
self.validate_post(halo_request, halo_response)
# 8. do filter
self.do_filter(halo_request,halo_response)
# 9. return json response
return halo_response
def do_filter(self, halo_request, halo_response): #
request_filter = self.get_request_filter(halo_request)
request_filter.do_filter(halo_request, halo_response)
class A2(Resource, A1, AbsBaseLinkX):
def set_api_headers_deposit(self,halo_request, seq=None, dict=None):
return super(A2,self).set_api_headers(halo_request, seq, dict)
def set_api_vars_deposit(self,halo_request, seq=None, dict=None):
return super(A2,self).set_api_vars(halo_request, seq, dict)
def set_api_auth_deposit(self,halo_request, seq=None, dict=None):
return super(A2,self).set_api_auth(halo_request, seq, dict)
def set_api_data_deposit(self,halo_request, seq=None, dict=None):
return super(A2,self).set_api_data(halo_request, seq, dict)
def execute_api_deposit(self,halo_request, back_api, back_vars, back_headers, back_auth, back_data=None, seq=None, dict=None):
return super(A2,self).execute_api(halo_request, back_api, back_vars, back_headers, back_auth, back_data, seq, dict)
def extract_json_deposit(self,halo_request, back_response, seq=None):
if seq == None:#no event
if halo_request.request.method == HTTPChoice.get.value:#method type
return {"tst_get_deposit":"good"}
if halo_request.request.method == HTTPChoice.delete.value:#method type
return {"tst_delete_deposit":"good"}
else:#in event
if halo_request.request.method == HTTPChoice.put.value:#method type
if seq == '1':
return {"tst_put_deposit":"good1"}
if seq == '2':
return {"tst_put_deposit":"good2"}
if halo_request.request.method == HTTPChoice.post.value:#method type
if seq == '1':
return {"tst_post_deposit":"good1"}
if seq == '2':
return {"tst_post_deposit":"good2"}
if halo_request.request.method == HTTPChoice.patch.value:#method type
return {"tst_patch_deposit":"good"}
def create_resp_payload(self, halo_request, dict_back_json):
if dict_back_json:
dict_back_json = {
"employees": [
{
"id": 1,
"name": "Pankaj",
"salary": "10000"
},
{
"name": "David",
"salary": "5000",
"id": 2
}
]
}
dict_back_json1 = {
"store": {
"book": [{
"category": "reference",
"author": "<NAME>",
"title": "Sayings of the Century",
"price": 8.95
}, {
"category": "fiction",
"author": "<NAME>",
"title": "Sword of Honour",
"price": 12.99
}, {
"category": "fiction",
"author": "<NAME>",
"title": "Moby Dick",
"isbn": "0-553-21311-3",
"price": 8.99
}, {
"category": "fiction",
"author": "<NAME>",
"title": "The Lord of the Rings",
"isbn": "0-395-19395-8",
"price": 22.99
}],
"bicycle": {
"color": "red",
"price": 19.95
}
},
"expensive": 10
}
return super(A2,self).create_resp_payload(halo_request, dict_back_json)
class P1(PerfMixinX):
pass
class P2(PerfLinkX):
pass
from halo_flask.flask.filter import RequestFilter,RequestFilterClear
class TestFilter(RequestFilter):
def augment_event_with_headers_and_data(self,event, halo_request,halo_response):
event.put(HaloContext.items.get(HaloContext.CORRELATION), halo_request.request.headers[HaloContext.items.get(HaloContext.CORRELATION)])
return event
class TestRequestFilterClear(RequestFilterClear):
def run(self,event):
print("insert_events_to_repository " + str(event.serialize()))
class CAContext(HaloContext):
TESTER = "TESTER"
HaloContext.items[TESTER] = "x-tester-id"
def get_host_name():
if 'HALO_HOST' in os.environ:
return os.environ['HALO_HOST']
else:
return 'HALO_HOST'
class TestUserDetailTestCase(unittest.TestCase):
"""
Tests /users detail operations.
"""
def setUp(self):
#self.url = 'http://127.0.0.1:8000/?abc=def'
#self.perf_url = 'http://127.0.0.1:8000/perf'
#app.config['TESTING'] = True
#app.config['WTF_CSRF_ENABLED'] = False
#app.config['DEBUG'] = False
#app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(app.config['BASEDIR'], TEST_DB)
#self.app = app#.test_client()
#app.config.from_pyfile('../settings.py')
app.config.from_object('settings')
self.a1 = A1()
self.a2 = A2()
self.a3 = A3()
self.p1 = P1()
self.p2 = P2()
def test_000_start(self):
from halo_flask.const import LOC
app.config['ENV_TYPE'] = LOC
app.config['SSM_TYPE'] = "AWS"
app.config['FUNC_NAME'] = "FUNC_NAME"
#app.config['API_CONFIG'] =
app.config['AWS_REGION'] = 'us-east-1'
with app.test_request_context(method='GET', path='/?abc=def'):
try:
load_api_config(app.config['ENV_TYPE'], app.config['SSM_TYPE'], app.config['FUNC_NAME'],
app.config['API_CONFIG'])
except Exception as e:
eq_(e.__class__.__name__, "NoApiClassException")
def test_00_start(self):
app.config['SSM_TYPE'] = "AWS"
app.config['AWS_REGION'] = 'us-east-1'
with app.test_request_context(method='GET', path='/?abc=def'):
try:
HALO_HOST = get_host_name()
params = {}
params["url"] = set_host_param_config(HALO_HOST)
set_app_param_config(app.config['SSM_TYPE'],params)
except Exception as e:
eq_(e.__class__.__name__, "NoApiClassException")
def test_01_start(self):
app.config['SSM_TYPE'] = "AWS"
app.config['AWS_REGION'] = 'us-east-1'
app.config['FUNC_NAME'] = "halo_flask"
with app.test_request_context(method='GET', path='/?abc=def'):
try:
val = get_app_param_config(app.config['SSM_TYPE'], app.config['FUNC_NAME'],"url")
print("val="+str(val))
except Exception as e:
eq_(e.__class__.__name__, "NoApiClassException")
def test_0_start(self):
with app.test_request_context(method='GET', path='/?abc=def'):
try:
if 'INIT_DATA_MAP' in app.config and 'INIT_CLASS_NAME' in app.config:
data_map = app.config['INIT_DATA_MAP']
class_name = app.config['INIT_CLASS_NAME']
load_global_data(class_name, data_map)
except Exception as e:
eq_(e.__class__.__name__, "NoApiClassException")
def test_1_get_request_returns_exception(self):
with app.test_request_context(method='GET', path='/?abc=def'):
try:
response = self.a1.process_get(request, {})
raise False
except Exception as e:
eq_(e.__class__.__name__, "NoApiClassException")
def test_2_delete_request_returns_dict(self):
with app.test_request_context(method='DELETE', path='/?abc=def'):
response = self.a1.process_delete(request, {})
eq_(response.payload, {"tst_delete":"good"})
def test_3_put_request_returns_dict(self):
with app.test_request_context(method='PUT', path='/?abc=def'):
response = self.a1.process_put(request, {})
eq_(response.payload, {'1': {'tst_put': 'good1'}, '2': {'tst_put': 'good2'}})
def test_4_post_request_returns_a_given_string(self):
with app.test_request_context(method='POST', path='/?abc=def'):
response = self.a1.process_post(request, {})
print("response=" + str(response.payload))
eq_(response.code, status.HTTP_201_CREATED)
eq_(response.payload, {'$.BookHotelResult': {'tst_post': 'good1'}, '$.BookFlightResult': {'tst_post': 'good2'}, '$.BookRentalResult': None})
def test_5_patch_request_returns_a_given_string(self):
with app.test_request_context(method='PATCH', path='/?abc=def'):
response = self.a1.process_patch(request, {})
print("response=" + str(response.payload))
eq_(response.code, status.HTTP_200_OK)
eq_(response.payload, {'$.BookHotelResult': {'tst_patch': 'good'}, '$.BookFlightResult': {'tst_patch': 'good'}, '$.BookRentalResult': {'tst_patch': 'good'}})
def test_6_api_request_returns_a_CircuitBreakerError(self):
app.config['CIRCUIT_BREAKER'] = True
with app.test_request_context(method='GET', path='/?a=b'):
api = CnnApi(Util.get_halo_context(request))
timeout = Util.get_timeout(request)
try:
response = api.get(timeout)
assert False
except ApiError as e:
#eq_(e.status_code, status.HTTP_403_NOT_FOUND)
eq_(e.__class__.__name__,"CircuitBreakerError")
def test_7_api_request_returns_a_given_CircuitBreakerError2(self):
app.config['CIRCUIT_BREAKER'] = True
with app.test_request_context(method='GET', path='/?a=b'):
api = TstApi(Util.get_halo_context(request))
timeout = Util.get_timeout(request)
try:
response = api.get(timeout)
assert False
except ApiError as e:
#eq_(e.status_code, status.HTTP_403_NOT_FOUND)
eq_(e.__class__.__name__,"ApiError")
def test_8_api_request_returns_a_fail(self):
with app.test_request_context(method='GET', path='/?a=b'):
api = CnnApi(Util.get_halo_context(request))
api.url = api.url + "/lgkmlgkhm??l,mhb&&,g,hj "
timeout = Util.get_timeout(request)
try:
response = api.get(timeout)
assert False
except ApiError as e:
eq_(e.status_code, status.HTTP_404_NOT_FOUND)
#eq_(e.__class__.__name__,"CircuitBreakerError")
def test_9_send_event(self):
with app.test_request_context(method='GET', path='/?a=b'):
from halo_flask.events import AbsBaseEvent
class Event1Event(AbsBaseEvent):
target_service = 'func1'
key_name = 'def'
key_val = '456'
event = Event1Event()
dict = {"name": "david"}
response = event.send_event(dict)
print("event response " + str(response))
eq_(response, 'sent event')
def test_901_event_filter(self):
app.config['REQUEST_FILTER_CLASS'] = 'tests_flask.TestFilter'
with app.test_request_context(method='GET', path='/?a=b',headers= {HaloContext.items.get(HaloContext.CORRELATION):"123"}):
response = self.a2.process_get(request,{})
def test_902_event_filter(self):
app.config['REQUEST_FILTER_CLASS'] = 'tests_flask.TestFilter'
app.config['REQUEST_FILTER_CLEAR_CLASS'] = 'tests_flask.TestRequestFilterClear'
with app.test_request_context(method='GET', path='/?a=b',headers= {HaloContext.items.get(HaloContext.CORRELATION):"123"}):
response = self.a2.process_get(request,{})
def test_903_event_filter(self):
app.config['REQUEST_FILTER_CLASS'] = 'tests_flask.TestFilter'
app.config['REQUEST_FILTER_CLEAR_CLASS'] = 'tests_flask.TestRequestFilterClear'
with app.test_request_context(method='GET', path='/?a=b',headers= {HaloContext.items.get(HaloContext.CORRELATION):"123"}):
response = self.a2.do_process(HTTPChoice.get,request.args)
def test_91_system_debug_enabled(self):
with app.test_request_context(method='GET', path='/?a=b'):
os.environ['DEBUG_LOG'] = 'true'
flag = 'false'
for i in range(0, 180):
ret = Util.get_system_debug_enabled()
print(ret)
if ret == 'true':
flag = ret
eq_(flag, 'true')
def test_92_debug_enabled(self):
header = {'X_HALO_DEBUG_LOG_ENABLED': 'true'}
with app.test_request_context(method='GET', path='/?a=b', headers=header):
ret = Util.get_halo_context(request)
eq_(ret.dict[HaloContext.items[HaloContext.DEBUG_LOG]], 'true')
def test_93_json_log(self):
import traceback
header = {'X_HALO_DEBUG_LOG_ENABLED': 'true'}
with app.test_request_context(method='GET', path='/?a=b', headers=header):
halo_context = Util.get_halo_context(request)
try:
raise Exception("test it")
except Exception as e:
e.stack = traceback.format_exc()
ret = log_json(halo_context, {"abc": "def"}, err=e)
print(str(ret))
eq_(ret[HaloContext.items[HaloContext.DEBUG_LOG]], 'true')
def test_94_get_request_with_debug(self):
header = {'X_HALO_DEBUG_LOG_ENABLED': 'true'}
with app.test_request_context(method='GET', path='/?a=b', headers=header):
ret = Util.get_debug_enabled(request)
eq_(ret, 'true')
def test_95_debug_event(self):
event = {HaloContext.items[HaloContext.DEBUG_LOG]: 'true'}
ret = BaseUtil.get_correlation_from_event(event)
eq_(BaseUtil.event_req_context[HaloContext.items[HaloContext.DEBUG_LOG]], 'true')
ret = BaseUtil.get_correlation_from_event(event)
eq_(ret[HaloContext.items[HaloContext.DEBUG_LOG]], 'true')
def test_96_pref_mixin(self):
with app.test_request_context(method='GET', path='/perf'):
response = self.p1.process_get(request, {})
eq_(response.code, status.HTTP_200_OK)
def test_97_pref_mixin1(self):
with app.test_request_context(method='GET', path='/perf/tst'):
response = self.p2.get()
eq_(response.status_code, status.HTTP_200_OK)
def test_98_run_simple_delete(self):
with app.test_request_context(method='DELETE', path="/start"):
response = self.a2.delete()
eq_(response.status_code, status.HTTP_200_OK)
def test_990_run_seq_get(self):
with app.test_request_context(method='GET', path="/"):
response = self.a2.get()
eq_(response.status_code, status.HTTP_200_OK)
def test_991_load_saga(self):
with open("../env/config/saga.json") as f:
jsonx = json.load(f)
sagax = saga.load_saga("test", jsonx, app.config['SAGA_SCHEMA'])
eq_(len(sagax.actions), 6)
def test_9920_run_saga(self):
with app.test_request_context(method='POST', path="/"):
response = self.a2.post()
eq_(response.status_code, status.HTTP_201_CREATED)
def test_9921_run_saga_bq(self):
with app.test_request_context(method='POST', path="/tst?sub_func=deposit"):
response = self.a2.post()
eq_(response.status_code, status.HTTP_201_CREATED)
def test_9922_run_saga_bq_error(self):
with app.test_request_context(method='POST', path="/tst?sub_func=tst"):
try:
response = self.a2.post()
raise False
except Exception as e:
eq_(e.__class__.__name__, "InternalServerError")
def test_9923_trans_json(self):
with app.test_request_context(method='GET', path="/tst"):
try:
response = self.a2.get()
eq_(response.data, b'{"employees": [{"id": 1, "name": "Pankaj", "salary": "10000"}, {"name": "David", "salary": "5000", "id": 2}]}')
except Exception as e:
eq_(e.__class__.__name__, "InternalServerError")
def test_9930_rollback_saga(self):
with app.test_request_context(method='PUT', path="/"):
try:
response = self.a2.process_put(request, {})
assert False
except Exception as e:
eq_(e.__class__.__name__, "ApiError")
def test_9931_rollback_saga_error(self):
with app.test_request_context(method='PATCH', path="/"):
try:
response = self.a2.process_patch(request, {})
assert False
except Exception as e:
eq_(e.__class__.__name__, "SagaError")
def test_9932_all_rollback_saga(self):
with app.test_request_context(method='PUT', path="/"):
try:
response = self.a2.put()
assert False
except Exception as e:
eq_(e.__class__.__name__, "InternalServerError")
def test_9933_all_rollback_saga_bq(self):
with app.test_request_context(method='PUT', path="/test?sub_func=deposit"):
try:
response = self.a2.put()
assert False
except Exception as e:
eq_(e.__class__.__name__, "InternalServerError")
def test_9940_ssm_aws(self): # @TODO test without HALO_AWS
header = {'HTTP_HOST': '127.0.0.2'}
app.config['HALO_HOST'] = 'halo_flask'
app.config['SSM_TYPE'] = "AWS"
#app.config['PROVIDER'] = "AWS"
app.config['AWS_REGION'] = 'us-east-1'
with app.test_request_context(method='GET', path='/?a=b', headers=header):
try:
from halo_flask.ssm import set_app_param_config
params = {}
params["id"] = "124"
set_app_param_config(app.config['SSM_TYPE'],params )
import time
print("sleep.")
time.sleep(5.4)
from halo_flask.ssm import get_app_config
config = get_app_config(app.config['SSM_TYPE'])
eq_(config.get_param("halo_flask")["id"], '124')
except Exception as e:
eq_(e.__class__.__name__, "ProviderError")
def test_9941_ssm_aws(self): # @TODO test with HALO_AWS
header = {'HTTP_HOST': | |
<filename>taxontabletools/site_occupancy.py
def site_occupancy_barchart(TaXon_table_xlsx, meta_data_to_test, taxonomic_level, path_to_outdirs, x_site_occ, y_site_occ, template, theme, font_size):
import os, webbrowser
import pandas as pd
from pandas import DataFrame
from pathlib import Path
import plotly.graph_objects as go
import PySimpleGUI as sg
color1 = theme[0]
color2 = theme[1]
opacity_value = theme[2]
## adjust taxonomic level if neccessary
if taxonomic_level in ["ASVs", "ESVs", "OTUs", "zOTUs"]:
taxon_title = taxonomic_level
taxonomic_level = "ID"
TaXon_table_xlsx = Path(TaXon_table_xlsx)
TaXon_table_df = pd.read_excel(TaXon_table_xlsx, header = 0)
TaXon_table_samples = TaXon_table_df.columns.tolist()[10:]
Meta_data_table_xlsx = Path(str(path_to_outdirs) + "/" + "Meta_data_table" + "/" + TaXon_table_xlsx.stem + "_metadata.xlsx")
Meta_data_table_df = pd.read_excel(Meta_data_table_xlsx, header = 0).fillna("nan")
Meta_data_table_samples = Meta_data_table_df['Samples'].tolist()
metadata_list = Meta_data_table_df[meta_data_to_test].values.tolist()
metadata_loc = Meta_data_table_df.columns.tolist().index(meta_data_to_test)
## drop samples with metadata called nan (= empty)
drop_samples = [i[0] for i in Meta_data_table_df.values.tolist() if i[metadata_loc] == "nan"]
if drop_samples != []:
## filter the TaXon table
TaXon_table_df = TaXon_table_df.drop(drop_samples, axis=1)
TaXon_table_samples = TaXon_table_df.columns.tolist()[10:]
## also remove empty OTUs
row_filter_list = []
for row in TaXon_table_df.values.tolist():
reads = set(row[10:])
if reads != {0}:
row_filter_list.append(row)
columns = TaXon_table_df.columns.tolist()
TaXon_table_df = pd.DataFrame(row_filter_list, columns=columns)
Meta_data_table_df = pd.DataFrame([i for i in Meta_data_table_df.values.tolist() if i[0] not in drop_samples], columns=Meta_data_table_df.columns.tolist())
Meta_data_table_samples = Meta_data_table_df['Samples'].tolist()
TaXon_table_n_samples = len(TaXon_table_samples)
n_sites = len(set(Meta_data_table_df[meta_data_to_test].tolist()))
answer = "Ask"
output_message = "No"
if (sorted(TaXon_table_samples) == sorted(Meta_data_table_samples) and TaXon_table_n_samples != n_sites):
site_occupancy_dict = {}
sites = set(Meta_data_table_df[meta_data_to_test].tolist())
for site in sites:
# this can either be a species name or the above specified taxonomic level
present_OTU_list = []
# extract samples that belong to the site from the metadata file
included_samples_list = Meta_data_table_df[Meta_data_table_df.values == site]['Samples'].values.tolist()
# count the number of samples per site to calculate the site occupancy
n_samples = len(included_samples_list)
# create a list of all species (or the specified taxonomic level)
if taxonomic_level == "OTUs":
taxonomic_level = "ID"
overall_included_species_list = TaXon_table_df[taxonomic_level].values.tolist()
# make the list unique
overall_included_species_set = set(overall_included_species_list)
# remove potential 'nan's from the list
overall_included_species_set = [x for x in overall_included_species_set if str(x) != 'nan']
# create a set of species that is present at the sites
for sample in included_samples_list:
OTUs_per_species_list = []
# check the read abundaces for each sample
read_abundace_list = TaXon_table_df[sample].values.tolist()
# enumerate the read abundaces for each sample and collect all lines that have more than one read
for i, read_abundance in enumerate(read_abundace_list):
species = TaXon_table_df[taxonomic_level][i]
# if reads are present, collect the species name (or the specified taxonomic level) from the TaXon table
if read_abundance != 0:
OTUs_per_species_list.append(species)
# remove all nans
OTUs_per_species_list = [x for x in OTUs_per_species_list if str(x) != 'nan']
# make list unique
OTUs_per_species_list = list(set(OTUs_per_species_list))
# append to list of species for the current site
present_OTU_list.append(OTUs_per_species_list)
# flatten the list of present species per site
present_OTU_list_flattened = [val for sublist in present_OTU_list for val in sublist]
# store occupancy of each species in a dict, will be accessed by position in list
occupancy_dict = {}
# count the number of occurences for each species and calculate the occpancy based on the number of samples
for species in overall_included_species_set:
count = present_OTU_list_flattened.count(species)
occupancy = count / n_samples * 100
occupancy_dict[species] = occupancy
occupancy_dict = {k: v for k, v in sorted(occupancy_dict.items(), key=lambda item: item[1])}
occupancy_list = list(occupancy_dict.values())
species_list = list(occupancy_dict.keys())
if (taxonomic_level == "Species" or taxonomic_level == "Genus"):
x_values = ["<i>" + taxon + "</i>" for taxon in species_list]
else:
x_values = species_list
occupancy_plot_directory = Path(str(path_to_outdirs) + "/" + "Site_occupancy_plots" + "/" + TaXon_table_xlsx.stem)
if not os.path.exists(occupancy_plot_directory):
os.mkdir(occupancy_plot_directory)
fig = go.Figure(data=[go.Bar(x=x_values, y=occupancy_list)])
fig.update_traces(marker_color=color1, marker_line_color=color2,marker_line_width=0.6, opacity=opacity_value)
fig.update_layout(title_text=site + " (" + taxonomic_level + ")", yaxis_title="occupancy (%)")
fig.update_layout(height=int(y_site_occ), width=int(x_site_occ), template=template, font_size=font_size, title_font_size=font_size)
fig.update_yaxes(range=[0,100])
fig.update_xaxes(tickmode='linear')
fig.update_xaxes(tickangle=-90)
output_pdf = Path(str(occupancy_plot_directory) + "/" + site + "_" + taxonomic_level + ".pdf")
output_html = Path(str(occupancy_plot_directory) + "/" + site + "_" + taxonomic_level + ".html")
occupancy_table = Path(str(occupancy_plot_directory) + "/" + site + "_" + taxonomic_level + ".xlsx")
fig.write_image(str(output_pdf))
fig.write_html(str(output_html))
occupancy_df = pd.DataFrame(occupancy_list, species_list)
occupancy_df.columns = ["Occupancy"]
occupancy_df.index.name = "Taxon"
occupancy_df = occupancy_df.sort_values("Occupancy")
# sort the table numerical if OTUs were chosen
if taxonomic_level == "ID":
sort_list = []
for OTU in occupancy_df.index.tolist():
sort_list.append(int(OTU.split("_")[1]))
occupancy_df["sort"] = sort_list
occupancy_df = occupancy_df.sort_values("sort")
occupancy_df = occupancy_df.drop("sort", axis=1)
occupancy_df.to_excel(occupancy_table)
## ask to show file
answer = sg.PopupYesNo('Show plot?', keep_on_top=True)
if answer == "Yes":
webbrowser.open('file://' + str(output_html))
## print closing text
closing_text = "Site occupancy plots are found under:\n" + '/'.join(str(output_pdf).split("/")[-4:])
sg.Popup(closing_text, title="Finished", keep_on_top=True)
## write to log
from taxontabletools.create_log import ttt_log
placeholder = TaXon_table_xlsx.name + " (multiple site occupancy plots)"
ttt_log("site occupancy", "analysis", TaXon_table_xlsx.name, placeholder, meta_data_to_test, path_to_outdirs)
else:
sg.PopupError("Please check your Metadata file and Taxon table file: The samples do not match or the metadata is unique for all samples!", keep_on_top=True)
def site_occupancy_heatmap(TaXon_table_xlsx, path_to_outdirs, template, height, width, meta_data_to_test, taxonomic_level, font_size, color_discrete_sequence, add_categories_sum):
import PySimpleGUI as sg
import pandas as pd
import numpy as np
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from pathlib import Path
import webbrowser, os
TaXon_table_xlsx = Path(TaXon_table_xlsx)
Meta_data_table_xlsx = Path(str(path_to_outdirs) + "/" + "Meta_data_table" + "/" + TaXon_table_xlsx.stem + "_metadata.xlsx")
TaXon_table_df = pd.read_excel(TaXon_table_xlsx, header=0).fillna("unidentified")
TaXon_table_samples = TaXon_table_df.columns.tolist()[10:]
Meta_data_table_df = pd.read_excel(Meta_data_table_xlsx, header=0).fillna("nan")
Meta_data_table_samples = Meta_data_table_df['Samples'].tolist()
## drop samples with metadata called nan (= empty)
drop_samples = [i[0] for i in Meta_data_table_df.values.tolist() if i[1] == "nan"]
if drop_samples != []:
## filter the TaXon table
TaXon_table_df = TaXon_table_df.drop(drop_samples, axis=1)
TaXon_table_samples = TaXon_table_df.columns.tolist()[10:]
## also remove empty OTUs
row_filter_list = []
for row in TaXon_table_df.values.tolist():
reads = set(row[10:])
if reads != {0}:
row_filter_list.append(row)
columns = TaXon_table_df.columns.tolist()
TaXon_table_df = pd.DataFrame(row_filter_list, columns=columns)
Meta_data_table_df = pd.DataFrame([i for i in Meta_data_table_df.values.tolist() if i[0] not in drop_samples], columns=Meta_data_table_df.columns.tolist())
Meta_data_table_samples = Meta_data_table_df['Samples'].tolist()
metadata_list = Meta_data_table_df[meta_data_to_test].values.tolist()
## create a y axis title text
taxon_title = taxonomic_level
## adjust taxonomic level if neccessary
if taxonomic_level in ["ASVs", "ESVs", "OTUs", "zOTUs"]:
taxon_title = taxonomic_level
taxonomic_level = "ID"
if len(set(metadata_list)) == 1:
sg.PopupError("Please choose more than one meta data category.")
else:
if sorted(TaXon_table_samples) == sorted(Meta_data_table_samples):
## define variables
samples = TaXon_table_samples
OTU_abundances_dict = {}
samples_metadata_list = []
## extract the relevant data
TaXon_table_df = TaXon_table_df[[taxonomic_level] + samples]
## define an aggregation function to combine multiple hit of one taxonimic level
aggregation_functions = {}
## define samples functions
for sample in samples:
## 'sum' will calculate the sum of p/a data
aggregation_functions[sample] = 'sum'
## define taxon level function
aggregation_functions[taxonomic_level] = 'first'
## create condensed dataframe
TaXon_table_df = TaXon_table_df.groupby(TaXon_table_df[taxonomic_level]).aggregate(aggregation_functions)
if 'unidentified' in TaXon_table_df.index:
TaXon_table_df = TaXon_table_df.drop('unidentified')
## create a list of samples for each category
category_dict = {}
for sample, category in zip(Meta_data_table_samples, metadata_list):
if category not in category_dict.keys():
category_dict[category] = [sample]
else:
category_dict[category] = category_dict[category] + [sample]
## collect all available taxa
taxa = TaXon_table_df[taxonomic_level].values.tolist()
## check if the respective species are present in the collections
taxon_presence_dict = {}
n_rows, row_heights = [], []
color_discrete_sequence = color_discrete_sequence * len(category_dict.keys())
if (taxonomic_level == "Species" or taxonomic_level == "Genus"):
x_values = ["<i>" + taxon + "</i>" for taxon in taxa]
else:
x_values = taxa
if add_categories_sum == True:
for samples in category_dict.values():
row_heights.append(len(samples))
row_heights.append(len(set(metadata_list)))
fig = make_subplots(rows=len(set(metadata_list)) + 1, cols=1, shared_xaxes=True, vertical_spacing=0.05, row_heights=row_heights)
else:
for samples in category_dict.values():
row_heights.append(len(samples))
fig = make_subplots(rows=len(set(metadata_list)), cols=1, shared_xaxes=True, vertical_spacing=0.05, row_heights=row_heights)
row = 1
for metadata, samples in category_dict.items():
if type(samples) == "str":
samples = [samples]
z_values = []
for sample in samples:
reads = TaXon_table_df[sample].values.tolist()
z_values = z_values + [[1 if x > 0 else 0 for x in reads]]
y_values = samples
fig.add_trace(go.Heatmap(z=z_values, x=x_values, y=y_values, showscale=False, xgap=1, ygap=1, hoverongaps = False, colorscale=[[0, "White"], [1, color_discrete_sequence[row-1]]]), row=row, col=1)
row += 1
if add_categories_sum == True:
z_values, y_values = [], []
for metadata, samples in category_dict.items():
reads = [sum(reads) for reads in TaXon_table_df[samples].values.tolist()]
z_values = z_values + [[1 if x > 0 else 0 for x in reads]]
y_values.append(metadata)
fig.add_trace(go.Heatmap(z=z_values[::-1], x=x_values, y=y_values[::-1], showscale=False, xgap=1, ygap=1, hoverongaps = False, colorscale=[[0, "White"], [1, "Grey"]]), row=row, col=1)
row += 1
fig.update_layout(width=int(width), height=int(height), template="seaborn", font_size=font_size, yaxis_nticks=5, title_font_size=font_size)
fig.update_xaxes(tickmode='linear')
fig.update_yaxes(tickmode='linear')
fig.update_xaxes(tickangle=-90)
occupancy_plot_directory = Path(str(path_to_outdirs) + "/" + "Site_occupancy_plots" + "/" + TaXon_table_xlsx.stem)
if not os.path.exists(occupancy_plot_directory):
os.mkdir(occupancy_plot_directory)
## define | |
#!/usr/bin/env python
import numpy as np
from tqdm import tqdm
from astropy.constants import G as Ggrav
from .low_level_utils import fast_dist
G = Ggrav.to('kpc Msun**-1 km**2 s**-2').value
def all_profiles(bins, positions, velocities, masses, two_dimensional=False, zcut=None,
ages=None, pbar_msg='Making profiles"', nexpr=False):
"""
assumes all positions and velocities are rotated in the same way, such
that the angular momentum axis aligns with the z axis
if two_dimensional == False, then compute:
M(<r), M(r), rho = M(r)/dV, Vcirc = sqrt(GM(<r)/r), mag J(r), mag J(<r), J_z(r), J_z(<r)
if two_dimensional == True, then compute:
M(<R), M(R), rho = M(R)/dA, Vcirc = mean(vx**2 + vy**2), mag J(R), mag J(<R), J_z(R), J_z(<R)
:bins : array-like : sorted (from small to large) bin edges to use
:positions : array-like : particle positions, rotated such that z aligns with angular momentum axis
:velocities : array-like : particle velocities, rotated in the same way as the positions
:masses : array-like : particle masses, in the same order as positions and velocities
:two_dimensional : bool : whether or not to do 2D profiles
:pbar_msg: str : what to print for the pbar (total mass and number of particles is appended)
:nexpr : bool : whether or not to try to use numexpr to try to speed up the calculation
"""
if nexpr:
from numexpr import evaluate
print("Using numexpr for the masking and summing masses")
# work from outside in, throwing away particles as I no longer need them
assert positions.shape[0] == velocities.shape[0] == masses.shape[0]
m_of_r = np.empty(bins.size)
J_of_r = np.empty(bins.size)
Jz_of_r = np.empty(bins.size)
Jz_inside_r = np.empty(bins.size)
JinsideR = np.empty(bins.size)
specJinsideR = np.zeros(bins.size)
specJ_of_r = np.zeros(bins.size)
specJz_of_r = np.zeros(bins.size)
specJz_insideR = np.zeros(bins.size)
if ages is not None:
age_of_r = np.zeros(bins.size)
density = np.empty_like(m_of_r)
if two_dimensional:
vcirc = np.zeros(bins.size)
if two_dimensional:
x, y, z = positions.T
# distances are in the plane of the galaxy
distances = np.sqrt(x**2 + y**2)
else:
distances = fast_dist(positions) # center assumed to be at (0,0,0)
# throw away any particles beyond my last bin edge
msk = distances <= bins.max()
if two_dimensional:
msk = msk & (np.abs(z) <= zcut)
positions = positions[msk]
velocities = velocities[msk]
masses = masses[msk]
distances = distances[msk]
if ages is not None:
ages = ages[msk]
if two_dimensional:
x = x[msk]
y = y[msk]
# compute (angular) momenta for the particles:
# velocities should already have the halo at
pvec = (velocities.T*masses).T
# J = r cross p, and pos is assumed to have the halo at 0,0,0
Jvec = np.cross(positions, pvec)
del pvec
Jz = Jvec[:, 2]
if two_dimensional:
# calculate circular velocities:
# velocities in the plane of the disk
vx, vy = velocities[:, 0], velocities[:, 1]
V = np.vstack((vx, vy)).T # velocity vector in the plane of the disk
R = np.vstack((x, y)).T # distance vector in the plane of the disk
# use the definition of the dot product to find the angle between R and V, theta
# a dot b == mag(a) * mag(b) * cos(theta)
# => cos(theta) == a dot b / (mag(a) * mag(b))
# checked by hand -- does the dot product of R[ii] w/ V[ii]
R_dot_V = np.sum(R*V, axis=1)
mag_V = np.linalg.norm(V, axis=1)
# checked by hand -- gives the magnitdue of R[ii]
mag_R = np.linalg.norm(R, axis=1)
if careful:
assert (mag_R == distances).all() # should be identically true
theta = np.arccos(R_dot_V / (mag_R * mag_V))
# now that I know the angle, the circular velocity of each particle is going to be
# the magnitude of each velocity in the plane of the disk times the sin of angle between R and V
# -- if the angle is 0, then all the velocity is radial; if it's pi/2, then all the velocity is tangential (circular)
circular_velocities = mag_V*np.sin(theta)
# handle any nan (i.e. either R or V == 0) by replacing with a 0
print("Replacing {} NaNs with 0".format(
np.count_nonzero(np.isnan(circular_velocities))))
circular_velocities[np.isnan(circular_velocities)] = 0
# clean up to save memory
del R, V, theta
# make sure this is true because otherwise return will be nonsense since I use cumsum at the end
assert (np.sort(bins) == bins).all()
rev_bins = bins[::-1]
if two_dimensional:
pbar_msg += '; Mtot(R < {:.0f} kpc, Z < {:.1f} kpc)'.format(bins.max(), zcut)
else:
pbar_msg += '; Mtot(r < {:.0f} kpc)'.format(bins.max())
pbar_msg += ' = {:.2g} Msun, {:,} particles)'.format(
np.sum(masses), masses.size)
for ii in tqdm(range(len(rev_bins)), pbar_msg):
rhigh = rev_bins[ii]
if ii == len(rev_bins)-1:
rlow = 0
else:
rlow = rev_bins[ii+1]
assert rlow < rhigh
if two_dimensional:
shell_vol = 4.*np.pi*(rhigh**2 - rlow**2)
else:
shell_vol = 4./3.*np.pi*(rhigh**3 - rlow**3)
if nexpr:
# within_rhigh = evaluate("(distances <= rhigh)") #No need to do this -- I trim the particles before the loop and within the loop, so everything is within rhigh trivially
minsider = evaluate("sum(masses)")
inbin = evaluate("(distances > rlow)")
# sum up the masses where inbin, 0 otherwise
thism = evaluate("sum(where(inbin,masses,0))")
Jz_of_r[ii] = evaluate("sum(where(inbin,Jz,0))")
Jz_inside_r[ii] = evaluate("sum(Jz)")
# particles that are within rhigh but not in the bin. equivalent to (within_rhigh) & (logical_not( (distances>rlow) & (within_rhigh) )
# equivalent to False if not within_rhigh, so throws away outer particles
# equivalent to True & logical_not(True & True) = True & not(True) = True & False = False if distances > rlow and distances < rhigh
# equivalent to True & not(False & True) = True & not(False) = True if distances <= rlow
# keep = evaluate("~inbin") #but since I trim the particles so within_rhigh is trivially true (see above), this just reduces to not inbin, so no reason to calculate/store that
else:
# within_rhigh = distances <= rhigh
# &(within_rhigh) #works for both 2D and 3D
inbin = (distances > rlow)
minsider = np.sum(masses)
thism = np.sum(masses[inbin])
# keep = within_rhigh & (~inbin) #save logic as above
# just the z angular momentum for the particles int he bin, allowed to cancel
Jz_of_r[ii] = np.sum(Jz[inbin])
# Jz of all the particles inside R. should be smoother.
Jz_inside_r[ii] = np.sum(Jz)
m_of_r[ii] = thism
density[ii] = thism/shell_vol
# norm of the vector sum (sum(Jx), sum(Jy), sum(Jz)) of the angular momentum in the bin -- no need to mass weight because J is mass weighted
J_of_r[ii] = np.linalg.norm(np.sum(Jvec[inbin], axis=0))
# Do the same for all the particles inside the max of this bin; different because these can cancel differently
# remember that everything is within the max of this bin
JinsideR[ii] = np.linalg.norm(np.sum(Jvec, axis=0))
# normalize all those to the approrpiate specific value if m > 0.
if thism > 0:
specJ_of_r[ii] = J_of_r[ii]/thism
specJz_of_r[ii] = Jz_of_r[ii]/thism
if two_dimensional:
vcirc[ii] = np.average(
circular_velocities[inbin], weights=masses[inbin])
if ages is not None:
age_of_r[ii] = np.average(ages[inbin], weights=masses[inbin])
if minsider > 0:
specJinsideR[ii] = JinsideR[ii]/minsider
specJz_insideR[ii] = Jz_inside_r[ii]/minsider
distances = distances[~inbin]
masses = masses[~inbin]
positions = positions[~inbin]
velocities = velocities[~inbin]
Jvec = Jvec[~inbin]
Jz = Jz[~inbin]
if two_dimensional:
circular_velocities = circular_velocities[~inbin]
if ages is not None:
ages = ages[~inbin]
# swap everything back around so that I go from the inside out so that I can cumsum. remember bins is already sorted because I didn't swap it; I created rev_bins.
density = density[::-1]
m_of_r = m_of_r[::-1]
J_of_r = J_of_r[::-1]
Jz_of_r = Jz_of_r[::-1]
JinsideR = JinsideR[::-1]
Jz_inside_r = Jz_inside_r[::-1]
specJ_of_r = specJ_of_r[::-1]
specJz_of_r = specJz_of_r[::-1]
specJinsideR = specJinsideR[::-1]
specJz_insideR = specJz_insideR[::-1]
if ages is not None:
age_of_r = age_of_r[::-1]
mltr = np.cumsum(m_of_r)
Jltr = np.cumsum(J_of_r)
Jzltr = np.cumsum(Jz_of_r)
specJltr = np.cumsum(specJ_of_r)
specJzltr = np.cumsum(specJz_of_r)
# don't cumsum the "inside R" lines -- doesn't make much sense
if two_dimensional == False:
# calculate keplerian circular velocity
vcirc = np.sqrt(G*mltr/bins) # remember that bins didn't get reversed
else:
vcirc = vcirc[::-1]
# remember this gets saved directly, so be good about naming!
end = 'R' if two_dimensional else 'r'
toreturn = {
'density': density,
'M.of.'+end: m_of_r,
'J.of.'+end: J_of_r,
'Jz.of.'+end: Jz_of_r,
'J.inside'+end: JinsideR,
'Jz.inside'+end: Jz_inside_r,
'spec.J.of.'+end: specJ_of_r,
'spec.Jz.of.'+end: specJz_of_r,
'spec.Jinside'+end: specJinsideR,
'spec.Jz.insideR'+end: | |
advertise IPv6 graceful restart capability to this neighbor.
type: str
choices:
- enable
- disable
capability_orf:
description:
- Accept/Send IPv4 ORF lists to/from this neighbor.
type: str
choices:
- none
- receive
- send
- both
capability_orf6:
description:
- Accept/Send IPv6 ORF lists to/from this neighbor.
type: str
choices:
- none
- receive
- send
- both
capability_route_refresh:
description:
- Enable/disable advertise route refresh capability to this neighbor.
type: str
choices:
- enable
- disable
connect_timer:
description:
- Interval (sec) for connect timer.
type: int
default_originate_routemap:
description:
- Route map to specify criteria to originate IPv4 default. Source router.route-map.name.
type: str
default_originate_routemap6:
description:
- Route map to specify criteria to originate IPv6 default. Source router.route-map.name.
type: str
description:
description:
- Description.
type: str
distribute_list_in:
description:
- Filter for IPv4 updates from this neighbor. Source router.access-list.name.
type: str
distribute_list_in6:
description:
- Filter for IPv6 updates from this neighbor. Source router.access-list6.name.
type: str
distribute_list_out:
description:
- Filter for IPv4 updates to this neighbor. Source router.access-list.name.
type: str
distribute_list_out6:
description:
- Filter for IPv6 updates to this neighbor. Source router.access-list6.name.
type: str
dont_capability_negotiate:
description:
- Don't negotiate capabilities with this neighbor
type: str
choices:
- enable
- disable
ebgp_enforce_multihop:
description:
- Enable/disable allow multi-hop EBGP neighbors.
type: str
choices:
- enable
- disable
ebgp_multihop_ttl:
description:
- EBGP multihop TTL for this peer.
type: int
filter_list_in:
description:
- BGP filter for IPv4 inbound routes. Source router.aspath-list.name.
type: str
filter_list_in6:
description:
- BGP filter for IPv6 inbound routes. Source router.aspath-list.name.
type: str
filter_list_out:
description:
- BGP filter for IPv4 outbound routes. Source router.aspath-list.name.
type: str
filter_list_out6:
description:
- BGP filter for IPv6 outbound routes. Source router.aspath-list.name.
type: str
holdtime_timer:
description:
- Interval (sec) before peer considered dead.
type: int
interface:
description:
- Interface Source system.interface.name.
type: str
keep_alive_timer:
description:
- Keep alive timer interval (sec).
type: int
link_down_failover:
description:
- Enable/disable failover upon link down.
type: str
choices:
- enable
- disable
local_as:
description:
- Local AS number of neighbor.
type: int
local_as_no_prepend:
description:
- Do not prepend local-as to incoming updates.
type: str
choices:
- enable
- disable
local_as_replace_as:
description:
- Replace real AS with local-as in outgoing updates.
type: str
choices:
- enable
- disable
maximum_prefix:
description:
- Maximum number of IPv4 prefixes to accept from this peer.
type: int
maximum_prefix_threshold:
description:
- Maximum IPv4 prefix threshold value (1 - 100 percent).
type: int
maximum_prefix_threshold6:
description:
- Maximum IPv6 prefix threshold value (1 - 100 percent).
type: int
maximum_prefix_warning_only:
description:
- Enable/disable IPv4 Only give warning message when limit is exceeded.
type: str
choices:
- enable
- disable
maximum_prefix_warning_only6:
description:
- Enable/disable IPv6 Only give warning message when limit is exceeded.
type: str
choices:
- enable
- disable
maximum_prefix6:
description:
- Maximum number of IPv6 prefixes to accept from this peer.
type: int
name:
description:
- Neighbor group name.
required: true
type: str
next_hop_self:
description:
- Enable/disable IPv4 next-hop calculation for this neighbor.
type: str
choices:
- enable
- disable
next_hop_self6:
description:
- Enable/disable IPv6 next-hop calculation for this neighbor.
type: str
choices:
- enable
- disable
override_capability:
description:
- Enable/disable override result of capability negotiation.
type: str
choices:
- enable
- disable
passive:
description:
- Enable/disable sending of open messages to this neighbor.
type: str
choices:
- enable
- disable
prefix_list_in:
description:
- IPv4 Inbound filter for updates from this neighbor. Source router.prefix-list.name.
type: str
prefix_list_in6:
description:
- IPv6 Inbound filter for updates from this neighbor. Source router.prefix-list6.name.
type: str
prefix_list_out:
description:
- IPv4 Outbound filter for updates to this neighbor. Source router.prefix-list.name.
type: str
prefix_list_out6:
description:
- IPv6 Outbound filter for updates to this neighbor. Source router.prefix-list6.name.
type: str
remote_as:
description:
- AS number of neighbor.
type: int
remove_private_as:
description:
- Enable/disable remove private AS number from IPv4 outbound updates.
type: str
choices:
- enable
- disable
remove_private_as6:
description:
- Enable/disable remove private AS number from IPv6 outbound updates.
type: str
choices:
- enable
- disable
restart_time:
description:
- Graceful restart delay time (sec, 0 = global default).
type: int
retain_stale_time:
description:
- Time to retain stale routes.
type: int
route_map_in:
description:
- IPv4 Inbound route map filter. Source router.route-map.name.
type: str
route_map_in6:
description:
- IPv6 Inbound route map filter. Source router.route-map.name.
type: str
route_map_out:
description:
- IPv4 Outbound route map filter. Source router.route-map.name.
type: str
route_map_out6:
description:
- IPv6 Outbound route map filter. Source router.route-map.name.
type: str
route_reflector_client:
description:
- Enable/disable IPv4 AS route reflector client.
type: str
choices:
- enable
- disable
route_reflector_client6:
description:
- Enable/disable IPv6 AS route reflector client.
type: str
choices:
- enable
- disable
route_server_client:
description:
- Enable/disable IPv4 AS route server client.
type: str
choices:
- enable
- disable
route_server_client6:
description:
- Enable/disable IPv6 AS route server client.
type: str
choices:
- enable
- disable
send_community:
description:
- IPv4 Send community attribute to neighbor.
type: str
choices:
- standard
- extended
- both
- disable
send_community6:
description:
- IPv6 Send community attribute to neighbor.
type: str
choices:
- standard
- extended
- both
- disable
shutdown:
description:
- Enable/disable shutdown this neighbor.
type: str
choices:
- enable
- disable
soft_reconfiguration:
description:
- Enable/disable allow IPv4 inbound soft reconfiguration.
type: str
choices:
- enable
- disable
soft_reconfiguration6:
description:
- Enable/disable allow IPv6 inbound soft reconfiguration.
type: str
choices:
- enable
- disable
stale_route:
description:
- Enable/disable stale route after neighbor down.
type: str
choices:
- enable
- disable
strict_capability_match:
description:
- Enable/disable strict capability matching.
type: str
choices:
- enable
- disable
unsuppress_map:
description:
- IPv4 Route map to selectively unsuppress suppressed routes. Source router.route-map.name.
type: str
unsuppress_map6:
description:
- IPv6 Route map to selectively unsuppress suppressed routes. Source router.route-map.name.
type: str
update_source:
description:
- Interface to use as source IP/IPv6 address of TCP connections. Source system.interface.name.
type: str
weight:
description:
- Neighbor weight.
type: int
neighbor_range:
description:
- BGP neighbor range table.
type: list
suboptions:
id:
description:
- Neighbor range ID.
required: true
type: int
max_neighbor_num:
description:
- Maximum number of neighbors.
type: int
neighbor_group:
description:
- Neighbor group name. Source router.bgp.neighbor-group.name.
type: str
prefix:
description:
- Neighbor range prefix.
type: str
neighbor_range6:
description:
- BGP IPv6 neighbor range table.
type: list
suboptions:
id:
description:
- IPv6 neighbor range ID.
required: true
type: int
max_neighbor_num:
description:
- Maximum number of neighbors.
type: int
neighbor_group:
description:
- Neighbor group name. Source router.bgp.neighbor-group.name.
type: str
prefix6:
description:
- IPv6 prefix.
type: str
network:
description:
- BGP network table.
type: list
suboptions:
backdoor:
description:
- Enable/disable route as backdoor.
type: str
choices:
- enable
- disable
id:
description:
- ID.
required: true
type: int
prefix:
description:
- Network prefix.
type: str
route_map:
description:
- Route map to modify generated route. Source router.route-map.name.
type: str
network_import_check:
description:
- Enable/disable ensure BGP network route exists in IGP.
type: str
choices:
- enable
- disable
network6:
description:
- BGP IPv6 network table.
type: list
suboptions:
backdoor:
description:
- Enable/disable route as backdoor.
type: str
choices:
- enable
- disable
id:
description:
- ID.
required: true
type: int
prefix6:
description:
- Network IPv6 prefix.
type: str
route_map:
description:
- Route map to modify generated route. Source router.route-map.name.
type: str
redistribute:
description:
- BGP IPv4 redistribute table.
type: list
suboptions:
name:
description:
- Distribute list entry name.
required: true
type: str
route_map:
description:
- Route map name. Source router.route-map.name.
type: str
status:
description:
- Status
type: str
choices:
- enable
- disable
redistribute6:
description:
- BGP IPv6 redistribute table.
type: list
suboptions:
name:
description:
- Distribute list entry name.
required: true
type: str
route_map:
description:
- Route map name. Source router.route-map.name.
type: str
status:
description:
- Status
type: str
choices:
- enable
- disable
router_id:
description:
- Router ID.
type: str
scan_time:
description:
- Background scanner interval (sec), 0 to disable it.
type: int
synchronization:
description:
- Enable/disable only advertise routes from | |
# Author: <NAME>
# Contributors: <NAME>
import numpy as np
import scipy
import torch
class Geometry():
"""Helper class to calculate distances, angles, and dihedrals
with a unified, vectorized framework depending on whether pytorch
or numpy is used.
Parameters
----------
method : 'torch' or 'numpy' (default='torch')
Library used for compuations
device : torch.device (default=torch.device('cpu'))
Device upon which geometrical calculations will take place. When
embedded as an attribute for a feature class, the device will inherit
from the feature device attribute
"""
def __init__(self, method='torch', device=torch.device('cpu')):
self.device = device
if method not in ['torch', 'numpy']:
raise RuntimeError("Allowed methods are 'torch' and 'numpy'")
self.method = method
# # # # # # # # # # # # #
# Define any types here #
# # # # # # # # # # # # #
if method == 'torch':
self.bool = torch.bool
self.float32 = torch.float32
elif self.method == 'numpy':
self.bool = np.bool
self.float32 = np.float32
def check_for_nans(self, object, name=None):
"""This method checks an object for the presence of nans and
returns an error if any nans are found.
"""
if name is None:
name = ''
if self.isnan(object).any():
raise ValueError(
"Nan found in {}. Check your coordinates!)".format(
name)
)
def check_array_vs_tensor(self, object, name=None):
"""This method checks whether the object (i.e., numpy array or torch
tensor) is consistent with the method chosen for the Geometry
instance (i.e., 'numpy' or 'torch', respectively).
"""
if name is None:
name = ''
if self.method == 'numpy' and type(object) is not np.ndarray:
raise ValueError(
"Input argument {} must be type np.ndarray for Geometry(method='numpy')".format(
name)
)
if self.method == 'torch' and type(object) is not torch.Tensor:
raise ValueError(
"Input argument {} must be type torch.Tensor for Geometry(method='torch')".format(
name)
)
def get_distance_indices(self, n_beads, backbone_inds=[], backbone_map=None):
"""Determines indices of pairwise distance features.
"""
pair_order = []
adj_backbone_pairs = []
for increment in range(1, n_beads):
for i in range(n_beads - increment):
pair_order.append((i, i+increment))
if len(backbone_inds) > 0:
if (backbone_map[i+increment]
- backbone_map[i] == 1):
adj_backbone_pairs.append((i, i+increment))
return pair_order, adj_backbone_pairs
def get_redundant_distance_mapping(self, pair_order):
"""Reformulates pairwise distances from shape [n_frames, n_dist]
to shape [n_frames, n_beads, n_neighbors]
This is done by finding the index mapping between non-redundant and
redundant representations of the pairwise distances. This mapping can
then be supplied to Schnet-related features, such as a
RadialBasisFunction() layer, which use redundant pairwise distance
representations.
"""
pairwise_dist_inds = [zipped_pair[1] for zipped_pair in sorted(
[z for z in zip(pair_order,
np.arange(len(pair_order)))
])
]
map_matrix = scipy.spatial.distance.squareform(pairwise_dist_inds)
map_matrix = map_matrix[~np.eye(map_matrix.shape[0],
dtype=bool)].reshape(
map_matrix.shape[0], -1)
return map_matrix
def get_vectorize_inputs(self, inds, data):
"""Helper function to obtain indices for vectorized calculations.
"""
if len(np.unique([len(feat) for feat in inds])) > 1:
raise ValueError(
"All features must be the same length."
)
feat_length = len(inds[0])
ind_list = [[feat[i] for feat in inds]
for i in range(feat_length)]
dist_list = [data[:, ind_list[i+1], :]
- data[:, ind_list[i], :]
for i in range(feat_length - 1)]
if len(dist_list) == 1:
dist_list = dist_list[0]
return dist_list
def get_distances(self, distance_inds, data, norm=True):
"""Calculates distances in a vectorized fashion.
"""
self.check_array_vs_tensor(data, 'data')
distances = self.get_vectorize_inputs(distance_inds, data)
if norm:
distances = self.norm(distances, axis=2)
self.check_for_nans(distances, 'distances')
return distances
def get_angles(self, angle_inds, data, clip=True):
"""Calculates angles in a vectorized fashion.
If clip is True (default), then the angle cosines are clipped
to be between -1 and 1 to account for numerical error.
"""
self.check_array_vs_tensor(data, 'data')
base, offset = self.get_vectorize_inputs(angle_inds, data)
# This convention assumes that the middle index of the angle triplet
# is the angle vertex. Scalar multiplication of the first vector
# of the angle triplet by -1 means that the vertex point is
# subtracted from the non-vertex point for the first vector.
# This ensures that the arccos operation returns the acute angle
# at the vertex. See test_geometry_features for a non-parallel
# formulation.
base *= -1
angles = self.sum(base * offset, axis=2) / self.norm(base,
axis=2) / self.norm(
offset, axis=2)
if clip:
# Clipping to prevent the arccos to be NaN
angles = self.arccos(self.clip(angles,
lower_bound=-1.,
upper_bound=1.))
self.check_for_nans(angles, 'angles')
return angles
def get_dihedrals(self, dihed_inds, data):
"""Calculates dihedrals in a vectorized fashion.
Note
----
This is implemented in a hacky/bad way. It calculates twice as many
dihedrals as needed and removes every other one. There is a better
way to do this, I think using two lists of angles, but for now
this has the correct functionality.
"""
self.check_array_vs_tensor(data, 'data')
angle_inds = np.concatenate([[(f[i], f[i+1], f[i+2])
for i in range(2)] for f in dihed_inds])
base, offset = self.get_vectorize_inputs(angle_inds, data)
offset_2 = base[:, 1:]
cross_product_adj = self.cross(base, offset, axis=2)
cp_base = cross_product_adj[:, :-1, :]
cp_offset = cross_product_adj[:, 1:, :]
plane_vector = self.cross(cp_offset, offset_2, axis=2)
dihedral_cosines = self.sum(cp_base[:, ::2]*cp_offset[:, ::2],
axis=2)/self.norm(
cp_base[:, ::2], axis=2)/self.norm(cp_offset[:, ::2], axis=2)
dihedral_sines = self.sum(cp_base[:, ::2]*plane_vector[:, ::2],
axis=2)/self.norm(
cp_base[:, ::2], axis=2)/self.norm(plane_vector[:, ::2], axis=2)
dihedral_rad = self.arctan(dihedral_sines / dihedral_cosines)
#dihedral_rad = self.arccos(dihedral_cosines)
#dihedral_rad = self.arccos(self.clip(dihedral_cosines,
# lower_bound=-1.,
# upper_bound=1.))
self.check_for_nans(dihedral_rad, 'dihedral')
return dihedral_rad
def get_neighbors(self, distances, cutoff=None):
"""Calculates a simple neighbor list in which every bead sees
each other. If a cutoff is specified, only beads inside that distance
cutoff are considered as neighbors.
Parameters
----------
distances: torch.Tensor or np.array
Redundant distance matrix of shape (n_frames, n_beads, n_neighbors).
cutoff: float (default=None)
Distance cutoff in Angstrom in which beads are considered neighbors.
Returns
-------
neighbors: torch.Tensor or np.array
Indices of all neighbors of each bead. This is not affected by the
mask.
Shape [n_frames, n_beads, n_neighbors]
neighbor_mask: torch.Tensor or np.array
Index mask to filter out non-existing neighbors that were
introduced to due distance cutoffs.
Shape [n_frames, n_beads, n_neighbors]
"""
self.check_array_vs_tensor(distances, 'distances')
n_frames, n_beads, n_neighbors = distances.shape
# Create a simple neighbor list of shape [n_frames, n_beads, n_neighbors]
# in which every bead sees each other but themselves.
# First, create a matrix that contains all indices.
neighbors = self.tile(self.arange(n_beads), (n_frames, n_beads, 1))
# To remove the self interaction of beads, an inverted identity matrix
# is used to exclude the respective indices in the neighbor list.
neighbors = neighbors[:, ~self.eye(n_beads, dtype=self.bool)].reshape(
n_frames,
n_beads,
n_neighbors)
if cutoff is not None:
# Create an index mask for neighbors that are inside the cutoff
neighbor_mask = distances < cutoff
neighbor_mask = self.to_type(neighbor_mask, self.float32)
else:
neighbor_mask = self.ones((n_frames, n_beads, n_neighbors),
dtype=self.float32)
return neighbors, neighbor_mask
def _torch_eye(self, n, dtype):
if dtype == torch.bool:
# Only in pytorch>=1.2!
return torch.BoolTensor(np.eye(n, dtype=np.bool))
else:
return torch.eye(n, dtype=dtype)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # Versatile Methods # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# The methods implemented below should modify the originals as little as
# possible, such that the documentation for the respective method on the
# numpy and pytorch websites should be sufficient.
# Methods defined: arccos, cross, norm, sum, arange, tile, eye, ones,
# to_type, clip, isnan
def arccos(self, x):
if self.method == 'torch':
return torch.acos(x)
elif self.method == 'numpy':
return np.arccos(x)
def arctan(self, x):
if self.method == 'torch':
return torch.atan(x)
elif self.method == 'numpy':
return np.arctan(x)
def cross(self, x, y, axis):
if self.method == 'torch':
return torch.cross(x, y, dim=axis)
elif self.method == 'numpy':
return np.cross(x, y, axis=axis)
def norm(self, x, axis):
if self.method == 'torch':
return torch.norm(x, dim=axis)
elif self.method == 'numpy':
return np.linalg.norm(x, axis=axis)
def sum(self, x, axis):
if self.method == 'torch':
return torch.sum(x, dim=axis)
elif self.method == 'numpy':
return np.sum(x, axis=axis)
def arange(self, n):
if self.method == 'torch':
return torch.arange(n)
elif self.method == 'numpy':
return np.arange(n)
def tile(self, x, shape):
if self.method == | |
button1.bind(on_press=self.crv_callback_click)
button1.bind(on_release=self.crv_callback_reqhomebutton)
self.data.datarecord.setobject('reqhomebutton', button1, 'button')
button1.disabled = True
button2 = MyButton(text='Manage Crew')
button2.bind(on_release=self.crv_callback_managecrew)
button2.bind(on_press=self.crv_callback_click)
self.data.datarecord.setobject('reqmanagecrewbutton', button2, 'button')
button2.disabled = True
button3 = MyButton(text='Manage Locations')
button3.bind(on_press=self.crv_callback_click)
button3.bind(on_release=self.crv_callback_managelocations)
# dont need a disable here - as locations are always available
# also dont need an object as locations are simple
buttonmore = MyButton(text='More Settings')
buttonmore.bind(on_press=self.crv_callback_click)
buttonmore.bind(on_release=self.crv_callback_moresettingsbutton)
footer.add_widget(button1)
footer.add_widget(button2)
footer.add_widget(button3)
footer.add_widget(buttonmore)
box.add_widget(header)
box.add_widget(middle)
box.add_widget(footer)
screen_setting2.add_widget(box)
screen_setting2.bind(on_enter=self.screenmanager_callback_onenter)
return screen_setting2
def crv_callback_lastscreen(self, instance):
if self.data.lastscreen == '':
self.data.lastscreen = 'screen_main'
if not self.data.sm.has_screen(self.data.lastscreen):
self.data.lastscreen = 'screen_main'
self.data.sm.current = self.data.lastscreen
def crv_setup_display_update(self):
Logger.info('CRV:SETUP:update')
'''
This should be a generic screen that is written to when something is happening.
The back button has to be able to return from whence it came.
(i.e. the last screen).
'''
crv_setup_display_update = Screen(name='screen_display_update')
szh = [.5, 1]
box = BoxLayout(orientation='vertical', padding=10)
# header = self.logstart(3, 'screen_display_update')
header = self.screencreateheader('screen_display_update')
middle = MyBoundBox(orientation='vertical', pos_hint_x=0, size_hint_y=.8)
middlecontents = BoxLayout(orientation='vertical')
middletitle = MyLabel(size_hint_y=.2)
self.data.datarecord.setobject('upddisplab', middletitle, 'label')
middlecontents.add_widget(middletitle)
gridlist = GridLayout(cols=1, rows=1, size_hint_x=1, size_hint_y=None)
gridlist.bind(minimum_height=gridlist.setter('height'))
self.data.datarecord.setobject('upddispgrid', gridlist, 'grid')
wdth = box.width
#sv = ScrollView(size_hint=(None,None), size=(400,400))
sv = ScrollView()
sv.add_widget(gridlist)
box1 = BoxLayout(orientation='horizontal')
pb = ProgressBar()
box1.add_widget(pb)
self.data.datarecord.setobject('upddisppb', pb, 'progressbar')
middlecontents.add_widget(sv)
middlecontents.add_widget(box1)
middle.add_widget(middlecontents)
footer = BoxLayout(orientation='horizontal', size_hint_y=.1)
button1 = MyButton(text='Back')
button1.bind(on_release=self.crv_callback_lastscreen)
self.data.datarecord.setobject('upddispback', button1, 'button')
footer.add_widget(button1)
box.add_widget(header)
box.add_widget(middle)
box.add_widget(footer)
crv_setup_display_update.add_widget(box)
# used to display progress when sending emails
self.data.setdisplayaction(self.displayaction)
return crv_setup_display_update
def crv_setup_boatlog_screen(self):
Logger.info('CRV:SETUP:boatlog')
self.boatlog.readlocations()
screen_boatlog = Screen(name='screen_boatlog')
screen_boatlog.bind(on_enter=self.screenmanager_callback_onenter)
# logdict = {'time', 'type', 'from', 'to', 'arrived', 'incident', 'helm', 'nav', 'activity', 'action', 'result'}
# logkeys = ['time', 'type', 'from', 'to', 'LOG']
# loghead = {0: 'Time', 1: 'Type', 2: 'From', 3: 'To', 4: 'LOG'}
szh = [.5, 1]
box = BoxLayout(orientation='vertical', padding=10)
# header = self.logstart(1, 'screen_boatlog') # 1=showtime
header = self.screencreateheader('screen_boatlog')
middle = BoxLayout(orientation='vertical', size_hint_y=.8)
footer = BoxLayout(orientation='horizontal', size_hint=[1, .1])
activity = MyBoundBox(orientation='vertical', size_hint_y=.8)
entrybox = MyBoundBox(orientation='horizontal')
self.data.logrecord.setobject('logentrybox', entrybox, 'box')
entrycommonlog = MyBoundBox(orientation='vertical', size_hint_x=.6)
self.data.logrecord.setobject('logcommonlog', entrycommonlog, 'box')
topentrycommon = MyBoundBox(orientation='horizontal')
botentrycommon = MyBoundBox(orientation='horizontal')
# entryboatlog = MyBoundBox(orientation='vertical')
# self.data.logrecord.setobject('logtypeboat', entryboatlog, 'box')
# topentryboat = MyBoundBox(orientation='horizontal')
# botentryboat = MyBoundBox(orientation='horizontal')
entryincidentlog = MyBoundBox(orientation='vertical', size_hint_x=.4)
self.data.logrecord.setobject('logtypeincident', entryincidentlog, 'box')
topentryincident = MyBoundBox(orientation='horizontal')
botentryincident = MyBoundBox(orientation='horizontal')
entrytraininglog = MyBoundBox(orientation='vertical', size_hint_x=.4)
self.data.logrecord.setobject('logtypetraining', entrytraininglog, 'box')
topentrytraining = MyBoundBox(orientation='horizontal')
botentrytraining = MyBoundBox(orientation='horizontal')
# entrymaintlog = MyBoundBox(orientation='vertical')
# self.data.logrecord.setobject('logtypemaintenance', entrymaintlog, 'box')
# topentrymaint = MyBoundBox(orientation='horizontal')
# botentrymaint = MyBoundBox(orientation='horizontal')
# entryprlog = MyBoundBox(orientation='vertical')
# self.data.logrecord.setobject('logtypepr', entryprlog, 'box')
# topentrypr = MyBoundBox(orientation='horizontal')
# botentrypr = MyBoundBox(orientation='horizontal')
# entryfundlog = MyBoundBox(orientation='vertical')
# self.data.logrecord.setobject('logtypefund', entryfundlog, 'box')
# topentryfund = MyBoundBox(orientation='horizontal')
# botentryfund = MyBoundBox(orientation='horizontal')
# entryprevalog = MyBoundBox(orientation='vertical')
# self.data.logrecord.setobject('logtypeprevaction', entryprevalog, 'box')
# topentrypreva = MyBoundBox(orientation='horizontal')
# botentrypreva = MyBoundBox(orientation='horizontal')
# entryeducationlog = MyBoundBox(orientation='vertical')
# self.data.logrecord.setobject('logtypeeducation', entryeducationlog, 'box')
# topentryeducation = MyBoundBox(orientation='horizontal')
# botentryeducation = MyBoundBox(orientation='horizontal')
entryfaultlog = MyBoundBox(orientation='vertical', size_hint_x=.4)
self.data.logrecord.setobject('logtypefault', entryfaultlog, 'box')
topentryfault = MyBoundBox(orientation='horizontal')
botentryfault = MyBoundBox(orientation='horizontal')
entrystanddownlog = MyBoundBox(orientation='vertical', size_hint_x=.4)
self.data.logrecord.setobject('logtypestanddown', entrystanddownlog, 'box')
topentrystanddown = MyBoundBox(orientation='horizontal')
botentrystanddown = MyBoundBox(orientation='horizontal')
entrylaunchlog = MyBoundBox(orientation='vertical', size_hint_x=.4)
self.data.logrecord.setobject('logtypelaunch', entrylaunchlog, 'box')
topentrylaunch = MyBoundBox(orientation='horizontal')
#botentrylaunch = MyBoundBox(orientation='horizontal')
entryloglog = MyBoundBox(orientation='vertical', size_hint_x=.4)
self.data.logrecord.setobject('logtypelog', entryloglog, 'box')
topentrylog = MyBoundBox(orientation='horizontal')
#botentrylog = MyBoundBox(orientation='horizontal')
entryfuellog = MyBoundBox(orientation='vertical', size_hint_x=.4)
self.data.logrecord.setobject('logtypefuel', entryfuellog, 'box')
topentryfuel = MyBoundBox(orientation='horizontal')
botentryfuel = MyBoundBox(orientation='horizontal')
entryonstationlog = MyBoundBox(orientation='vertical', size_hint_x=.4)
self.data.logrecord.setobject('logtypeonstation', entryonstationlog, 'box')
topentryonstation = MyBoundBox(orientation='horizontal')
#botentryonstation = MyBoundBox(orientation='horizontal')
entrycrewlog = MyBoundBox(orientation='vertical', size_hint_x=.4)
self.data.logrecord.setobject('logtypecrew', entrycrewlog, 'box')
topentryaddcrew = MyBoundBox(orientation='horizontal')
botentryaddcrew = MyBoundBox(orientation='horizontal')
entryarrivelog = MyBoundBox(orientation='vertical', size_hint_x=.4)
self.data.logrecord.setobject('logtypearrive', entryarrivelog, 'box')
topentryarrive = MyBoundBox(orientation='horizontal')
entrydepartlog = MyBoundBox(orientation='vertical', size_hint_x=.4)
self.data.logrecord.setobject('logtypedepart', entrydepartlog, 'box')
topentrydepart = MyBoundBox(orientation='horizontal')
entryguestlog = MyBoundBox(orientation='vertical', size_hint_x=.4)
self.data.logrecord.setobject('logtypeguest', entryguestlog, 'box')
topentryguest = MyBoundBox(orientation='horizontal')
botentryguest = MyBoundBox(orientation='horizontal')
buttonbox1 = MyBoundBox(orientation='horizontal', size_hint_y=.4)
entrybox.add_widget(entrycommonlog)
# its just this single line that has to be changed when log type changed (default logtype is boat
# that means we have to save this entryboatlog, entryincidentlog, etc widgets
#entrybox.add_widget(entryboatlog)
entrycommonlog.add_widget(topentrycommon)
entrycommonlog.add_widget(botentrycommon)
# entryboatlog.add_widget(topentryboat)
# entryboatlog.add_widget(botentryboat)
# entryincidentlog.add_widget(topentryincident)
# entryincidentlog.add_widget(botentryincident)
entrytraininglog.add_widget(topentrytraining)
entrytraininglog.add_widget(botentrytraining)
entryfaultlog.add_widget(topentryfault)
entryfaultlog.add_widget(botentryfault)
# entrymaintlog.add_widget(topentrymaint)
# entrymaintlog.add_widget(botentrymaint)
# entryprlog.add_widget(topentrypr)
# entryprlog.add_widget(botentrypr)
# entryfundlog.add_widget(topentryfund)
# entryfundlog.add_widget(botentryfund)
# entryprevalog.add_widget(topentrypreva)
# entryprevalog.add_widget(botentrypreva)
entrystanddownlog.add_widget(topentrystanddown)
entrystanddownlog.add_widget(botentrystanddown)
# entryeducationlog.add_widget(topentryeducation)
# entryeducationlog.add_widget(botentryeducation)
entrycrewlog.add_widget(topentryaddcrew)
entrycrewlog.add_widget(botentryaddcrew)
entrylaunchlog.add_widget(topentrylaunch)
entryloglog.add_widget(topentrylog)
entryfuellog.add_widget(topentryfuel)
entryfuellog.add_widget(botentryfuel)
entryonstationlog.add_widget(topentryonstation)
entryarrivelog.add_widget(topentryarrive)
entrydepartlog.add_widget(topentrydepart)
entryguestlog.add_widget(topentryguest)
entryguestlog.add_widget(botentryguest)
# =========================================================
# Create widgets for common log
# When you select the time widget, set it to current time ONLY if empty
w, x, l = self.boxtextbox('Time', szh, orientation='horizontal')
x.bind(focus=self.act_callback_timefocus)
self.data.logrecord.setobjectplus('logtime', w, x, topentrycommon, 'text')
wloc, xloc, l = self.boxdropdown('Loc', self.boatlog.locations, [.5, 1])
self.data.logrecord.setobjectplus('loglocation', wloc, xloc, topentrycommon, 'drop')
# These options mist match the log selection records in data
# (commented as "the different types of logs")
whelm, xhelm, l = self.boxdropdown('Helm', self.crew.posscrewlist, [.5, 1])
self.data.logrecord.setobjectplus('loghelm', whelm, xhelm, botentrycommon, 'drop')
#
wnav, xnav, l = self.boxdropdown('Nav', self.crew.posscrewlist, [.5, 1])
self.data.logrecord.setobjectplus('lognav', wnav, xnav, botentrycommon, 'drop')
disptypes=[]
for n in self.data.logrecord.loggroup['logtypesdisp']:
disptypes.append(n)
disptypes.sort()
wtype, x, l = self.boxdropdown('Type', disptypes, [.5, 1], readonly=True)
#x.readonly = True
x.postcall = self.changelogbytype
self.data.logrecord.setobjectplus('logtype', wtype, x, botentrycommon, 'drop')
# =========================================================
# =========================================================
# create widgets specific to incident log
incdisptypes=[]
for n in self.data.logrecord.loggroup['logtypesincdisp']:
incdisptypes.append(n)
incdisptypes.sort()
wactivity, x, l = self.boxdropdown('Activity', incdisptypes, szh, readonly=True)
#x.readonly = True
x.postcall = self.changeactionbytype
self.data.logrecord.setobjectplus('logincidentactivity', wactivity, x, topentryincident, 'drop')
#w, x, l = self.boxtextbox('Notes', szh)
#self.data.logrecord.setobjectplus('logincidentnotes', w, x, botentryincident, 'text')
# --------------- create specific for incident action onscene
# 1. checkbox: SAP, checkbox ExitStrategy
# 2. text: notes
entryincactonscene = MyBoundBox(orientation='vertical')
self.data.logrecord.setobject('logtypeinconscene', entryincactonscene, 'box')
topentryinconscene = MyBoundBox(orientation='horizontal')
botentryinconscene = MyBoundBox(orientation='horizontal')
entryincactonscene.add_widget(topentryinconscene)
entryincactonscene.add_widget(botentryinconscene)
w, c, l = self.boxcheckbox('SAP', [1.2, 1])
self.data.logrecord.setobjectlist('loginconssap', w, [c, l], topentryinconscene, 'checkbox')
w, c, l = self.boxcheckbox('Exit Strategy', [1.2, 1])
self.data.logrecord.setobjectlist('loginconsexits', w, [c, l], topentryinconscene, 'checkbox')
w, x, l = self.boxtextbox('Notes', [.2, 1])
self.data.logrecord.setobjectplus('loginconsnotes', w, x, botentryinconscene, 'text')
# --------------- create specific for incident action tow
# 1. text: taken to time: arrived
# 2. text: notes check:dropped
entryincacttow = MyBoundBox(orientation='vertical')
self.data.logrecord.setobject('logtypeinctow', entryincacttow, 'box')
topentryinctow = MyBoundBox(orientation='horizontal')
botentryinctow = MyBoundBox(orientation='horizontal')
entryincacttow.add_widget(topentryinctow)
entryincacttow.add_widget(botentryinctow)
w, x, l = self.boxtextbox('Taken to', [.4, 1])
self.data.logrecord.setobjectplus('loginctowtakento', w, x, topentryinctow, 'text')
w, x = self.boxtimebox('Arrived', [.4, 1])
x.bind(focus=self.data.settimeonobject)
self.data.logrecord.setobjectplus('loginctowarrived', w, x, topentryinctow, 'text')
w, x, l = self.boxtextbox('Notes', [.4, 1])
self.data.logrecord.setobjectplus('loginctownotes', w, x, botentryinctow, 'text')
w, c, l = self.boxcheckbox('Tow Dropped', [1.2, 1])
self.data.logrecord.setobjectlist('loginctowdropped', w, [c, l], botentryinctow, 'checkbox')
# --------------- create specific for incident action shadow
# 1. text: shadow to time:stopped shadow
# 2. text: notes
entryincactshadow = MyBoundBox(orientation='vertical')
self.data.logrecord.setobject('logtypeincshadow', entryincactshadow, 'box')
topentryincshadow = MyBoundBox(orientation='horizontal')
botentryincshadow = MyBoundBox(orientation='horizontal')
entryincactshadow.add_widget(topentryincshadow)
entryincactshadow.add_widget(botentryincshadow)
w, x, l = self.boxtextbox('Shadow to', [.5, 1])
self.data.logrecord.setobjectplus('logincshadowto', w, x, topentryincshadow, 'text')
w, x = self.boxtimebox('Abandoned', [.5, 1])
self.data.logrecord.setobjectplus('logincshadowleft', w, x, topentryincshadow, 'text')
w, x, l = self.boxtextbox('Notes', [.5, 1])
self.data.logrecord.setobjectplus('logincshadownotes', w, x, botentryincshadow, 'text')
# --------------- create specific for incident action finish
# 1. text: taken to
# 2. text: notes
entryincactfinish = MyBoundBox(orientation='vertical')
self.data.logrecord.setobject('logtypeincfinish', entryincactfinish, 'box')
topentryincfinish = MyBoundBox(orientation='horizontal')
botentryincfinish = MyBoundBox(orientation='horizontal')
entryincactfinish.add_widget(topentryincfinish)
entryincactfinish.add_widget(botentryincfinish)
# --------------- create specific for incident action medical
# 1. status
# 2. text: notes
entryincactmedical = MyBoundBox(orientation='vertical')
self.data.logrecord.setobject('logtypeincmedical', entryincactmedical, 'box')
topentryincmedical = MyBoundBox(orientation='horizontal')
botentryincmedical = MyBoundBox(orientation='horizontal')
entryincactmedical.add_widget(topentryincmedical)
entryincactmedical.add_widget(botentryincmedical)
w, x, l = self.boxdropdown('Status',
['0', '1.Critical', '2.Serious', '3:Moderate', '4.Minor'], szh=[.5,1], readonly=True)
#w.readonly = True
self.data.logrecord.setobjectplus('logincmedicalstatus', w, x, topentryincmedical, 'drop')
w, x, l = self.boxtextbox('Treatment/Notes', szh=[.5, 1])
self.data.logrecord.setobjectplus('logincmedicalnotes', w, x, botentryincmedical, 'text')
# --------------- create specific for incident action mechanical
# 1. drop:Battery|Motor|Oil|Fuel|Other checkbox: started
# 2. notes
entryincactmechanical = MyBoundBox(orientation='vertical')
self.data.logrecord.setobject('logtypeincmechanical', entryincactmechanical, 'box')
topentryincmechanical = MyBoundBox(orientation='horizontal')
botentryincmechanical = MyBoundBox(orientation='horizontal')
entryincactmechanical.add_widget(topentryincmechanical)
entryincactmechanical.add_widget(botentryincmechanical)
w, x, l = self.boxdropdown('Reason',
['Battery', 'Motor', 'Fuel', 'Hull', 'Other'], szh, readonly=True)
#w.readonly = True
self.data.logrecord.setobjectplus('logincmechanicalreason', w, x, topentryincmechanical, 'drop')
w, c, l = self.boxcheckbox('Started', [.5, 1])
self.data.logrecord.setobjectlist('logincmechanicalstarted', w, [c, l], topentryincmechanical, 'checkbox')
w, x, l = self.boxtextbox('Notes', [.2, 1])
self.data.logrecord.setobjectplus('logincmechanicalnotes', w, x, botentryincmechanical, 'text')
# --------------- create specific for incident action sinking
# 1. notes
entryincactsinking = MyBoundBox(orientation='vertical')
self.data.logrecord.setobject('logtypeincsinking', entryincactsinking, 'box')
topentryincsinking = MyBoundBox(orientation='horizontal')
botentryincsinking = MyBoundBox(orientation='horizontal')
entryincactsinking.add_widget(topentryincsinking)
#entryincactsinking.add_widget(botentryincsinking)
w, x, l = self.boxtextbox('Notes', szh=[1,.3], multiline=True, orientation='vertical')
self.data.logrecord.setobjectplus('logincsinkingnotes', w, x, topentryincsinking, 'text')
# --------------- create specific for incident action aground
# 1. notes
entryincactaground = MyBoundBox(orientation='vertical')
self.data.logrecord.setobject('logtypeincaground', entryincactaground, 'box')
topentryincaground = MyBoundBox(orientation='horizontal')
#botentryincaground = MyBoundBox(orientation='horizontal')
entryincactaground.add_widget(topentryincaground)
#entryincactaground.add_widget(botentryincaground)
w, x, l = self.boxtextbox('Notes', szh=[1,.3], multiline=True, orientation='vertical')
self.data.logrecord.setobjectplus('logincagroundnotes', w, x, topentryincaground, 'text')
# --------------- create specific | |
# coding: utf-8
from __future__ import absolute_import, print_function
""" Interface with Spectrum Investigation """
__author__ = "<NAME> <<EMAIL>>"
import logging
import multiprocessing as mp
import numpy as np
import os
import re
import shutil
import scipy.optimize as op
from random import choice
from signal import alarm, signal, SIGALRM, SIGKILL
from string import ascii_letters
from subprocess import PIPE, Popen
from time import time
from textwrap import dedent
from oracle.si import io, utils
logger = logging.getLogger("oracle")
class SIException(Exception):
pass
class instance(object):
# It is assumed that the setup.py installer has placed si_lineform on your $PATH
_executable = "si_lineform"
_acceptable_return_codes = (0, )
def __init__(self, twd_base_dir="/tmp", prefix="si", chars=10, debug=False,
**kwargs):
"""
A context manager for interfacing with SI.
:param twd_base_dir: [optional]
Base directory for temporary working directory required by SI.
:type twd_base_dir: str
:param prefix: [optional]
Filename prefix to use for temporary files.
:type prefix: str
:param chars: [optional]
Number of random characters to use for temporary file names.
:type chars: int
:param debug: [optional]
If SI raises an exception, keep temporary files and re-raise the
exception.
:type debug: bool
"""
if prefix is None:
prefix = ""
self.debug = debug
self.chars = chars
self.prefix = prefix
self.twd_base_dir = twd_base_dir
if not os.path.exists(self.twd_base_dir):
os.mkdir(self.twd_base_dir)
# Dragons ahead.
if "transition" in kwargs:
self._transition = kwargs["transition"]
def __enter__(self):
# Create a temporary working directory.
self.twd = os.path.join(self.twd_base_dir, self.prefix \
+ "".join([choice(ascii_letters) for _ in xrange(self.chars)]))
while os.path.exists(self.twd):
self.twd = os.path.join(self.twd_base_dir, self.prefix \
+ "".join([choice(ascii_letters) for _ in xrange(self.chars)]))
os.mkdir(self.twd)
logging.debug("Temporary working directory: {0}".format(self.twd))
# Link the line list.
if hasattr(self, "_transition"):
io.write_line_list(os.path.join(self.twd, "linedata.dat"),
np.array([self._transition]), mode="b")
else:
siu_line_list = os.path.abspath(os.path.join(os.path.dirname(
os.path.expanduser(__file__)), "linedata/master_line.dat"))
os.symlink(siu_line_list, os.path.join(self.twd, "linedata.dat"))
return self
def execute(self, filename="fort.10", timeout=60, shell=False, env=None):
"""
Execute SI with the input filename.
:param filename: [optional]
The input filename to execute with SI.
:type filename: str
:param timeout: [optional]
Number of seconds to wait for output before timing out.
:type timeout: int
:param shell: [optional]
Execute the command in shell.
:type shell: bool
:param env: [optional]
Enviroment variables to pass through to the SI environment.
:type env: dict
"""
logger.debug("Executing input file: {0}".format(
os.path.join(self.twd, filename)))
# Last chance to make sure a line list exists.
if not os.path.exists(os.path.join(self.twd, "linedata.dat")):
# Link the line list.
logger.debug("Last-minute inclusion of default line list.")
siu_line_list = os.path.abspath(os.path.join(os.path.dirname(
os.path.expanduser(__file__)), "linedata/master_line.dat"))
os.symlink(siu_line_list, os.path.join(self.twd, "linedata.dat"))
class Alarm(Exception):
pass
def alarm_handler(signum, frame):
raise Alarm
default_env = {}
default_env.update(os.environ.copy())
# We will make this relative to __file__ until we can remove
# all instances of SIU_MAIN in the fortran code itself.
default_env["SIU_MAIN"] = os.path.abspath(
os.path.join(os.path.dirname(os.path.expanduser(__file__))))
if env is not None:
default_env.update(env)
t = time()
p = Popen([self._executable],
shell=shell, bufsize=2056, cwd=self.twd, stdin=PIPE, stdout=PIPE,
stderr=PIPE, env=default_env, close_fds=True)
logger.debug("Subprocess call took {0:.3f} s".format(time() - t))
if timeout != -1:
signal(SIGALRM, alarm_handler)
alarm(timeout)
try:
pipe_input = "\n" if -6 in self._acceptable_return_codes else ""
stdout, stderr = p.communicate(input=pipe_input)
logger.debug("SI stdout (code {0}): {1}".format(p.returncode, stdout))
if timeout != -1:
alarm(0)
except Alarm:
# Process might have died before getting to this line so wrap it to
# avoid "OSError: no such process"
try:
os.kill(p.pid, SIGKILL)
except OSError:
pass
return (-9, "", "The process was killed due to timeout.")
if p.returncode not in self._acceptable_return_codes:
logger.warn("SI returned the following error (code {0:d}):".format(
p.returncode))
logger.warn(stdout)
raise SIException(stderr)
return (p.returncode, stdout, stderr)
def equivalent_width(self, teff, logg, metallicity, xi, transition,
wavelength_steps=(0.10, 0.005, 0.15), abundances=None, lte=True,
full_output=False):
"""
Return an equivalent width for a transition from ``species`` that occurs
near or at ``rest_wavelength`` for the given stellar parameters and
abundances.
:param teff:
Effective temperature of the model atmosphere (Kelvin).
:type teff:
float
:param logg:
Surface gravity of the model atmosphere.
:type logg:
float
:param metallicity:
Mean metallicity of the model atmosphere.
:type metallicity:
float
:param xi:
Microturbulence in the model atmosphere (km/s).
:type xi:
float
:param transition:
A record containing the wavelength, species, and atomic data for the
transition in question.
:type transition:
:class:`numpy.core.records.record`
:param wavelength_steps: [optional]
The optimal, minimum and maximum wavelength step to synthesise
(Angstroms).
:type wavelength_steps:
tuple
:param abundances: [optional]
The abundances (values) of different elements (keys).
:type abundances:
dict
:param lte: [optional]
Employ the approximation of local thermodynamic equilibrium.
:type lte:
bool
:param full_output: [optional]
Return the synthesised spectra and the SI output in addition to the
equivalent width. If ``True``, the return format is ``equivalent_width``,
``synthetic_spectra``, ``stdout``.
:type full_output:
bool
:returns:
The integrated equivalent width of the transition.
:rtype:
float
"""
# Remove any old output files so that we don't accidentally confuse
# ourselves.
filenames = ("fort.14", "fort.16",)# "linedata.dat")
for filename in filenames:
try:
os.remove(os.path.join(self.twd, filename))
except OSError:
continue
# Write the new line list.
io.write_line_list(os.path.join(self.twd, "linedata.dat"),
np.array([transition]), mode="b", clobber=True)
# Write the new input file/
with open(os.path.join(self.twd, "fort.10"), "w+") as fp:
# SI requires the minimum step width to be in mA, and the maximum
# step width to be in Angstroms. Because that makes perfect sense.
fp.write(dedent("""
{teff:.0f}
{logg:.3f}
{metallicity:+.3f}
{xi:.3f}
{rest_wavelength:.3f} {species:.3f}
{min_wl_step:.1f} {max_wl_step:.3f}
{si_bit:.0f}
{opt_wl_step:.3f}
""".format(teff=teff, logg=logg, metallicity=metallicity, xi=xi,
rest_wavelength=transition["wavelength"],
species=transition["atomic_number"] + 0.10 * (transition["ionised"] - 1),
min_wl_step=wavelength_steps[1] * 1000.,
max_wl_step=wavelength_steps[2],
opt_wl_step=wavelength_steps[0],
si_bit=8421423 if lte else 26247215)).lstrip())
if abundances is None:
abundances = {}
fp.write("{0:.0f}\n".format(len(abundances)))
for element, abundance in abundances.iteritems():
fp.write("{atomic_number:.0f} {abundance:.3f}\n".format(
atomic_number=utils.atomic_number(element),
abundance=abundance))
# Execute it.
returncode, stdout, stderr = self.execute()
try:
wavelength, lower_excitation_potential, equivalent_width = \
np.loadtxt(os.path.join(self.twd, "fort.16"),
usecols=(0, 2, 6, )).flatten()
except IOError:
raise SIException("no equivalent width found in {0}".format(
os.path.join(self.twd, "fort.16")))
# Grab the synthetic spectra too.
try:
synthetic_spectra = np.loadtxt(os.path.join(self.twd, "fort.14"))
except IOError:
raise SIException("no synthetic spectra found in {0}".format(
os.path.join(self.twd, "fort.14")))
if len(synthetic_spectra) == 0:
raise SIException("no synthetic spectra found in {0}".format(
os.path.join(self.twd, "fort.14")))
if full_output:
return (equivalent_width, synthetic_spectra, stdout)
return equivalent_width
@utils.rounder(None, 0, 3, 3, 3, 3, 3)
@utils.lru_cache(maxsize=128, typed=False)
def synthesise_transition(self, teff, logg, metallicity, xi, abundance,
surrounding=2, wavelength_steps=(0.10, 0.005, 1.5), full_output=False):
"""
Synthesise spectrum surrounding a single transition for a given effective
temperature, surface gravity, abundances and microturbulence. Note that
this function within the instance assumes that the line list has already
been set up for the instance.
:param teff:
Effective temperature of the model atmosphere (Kelvin).
:type teff:
float
:param logg:
Surface gravity of the model atmosphere.
:type logg:
float
:param metallicity:
Mean metallicity of the model atmosphere.
:type metallicity:
float
:param xi:
Microturbulence in the model atmosphere (km/s).
:type xi:
float
:param abundance:
The abundance for the transition.
:type abundance:
float
:param surrounding:
The amount of spectrum to synthesise surrounding the line (either side).
:type surrounding:
float
:param wavelength_steps: [optional]
The optimal, minimum and maximum wavelength step to synthesise
(Angstroms).
:type wavelength_step_limits:
tuple
:param full_output: [optional]
Return the SI output as well as the synthesised spectra.
:type full_output:
bool
:returns:
A synthesised spectrum of the present line. If ``full_output`` is True,
then the spectrum, equivalent width, and SI standard output is returned.
"""
try:
# transition information should be in this instance.
assert self._transition, "No transition information in instance. "\
"Do you know what you're doing?"
except:
None
transition = self._transition
region = (
transition["wavelength"] - surrounding,
transition["wavelength"] + surrounding
)
# Synthesise
spectrum, stdout = self.synthesise(teff, logg, metallicity, xi, region,
wavelength_steps,
abundances={utils.element(transition["atomic_number"]): abundance},
full_output=True)
if not np.any(spectrum[:, 1] < 1.):
logger.warn("No line center synthesised for transition at {0:.2f} A"\
" with [{1}/H] = {2:.2f}. Line is either too weak or not in the"\
" provided line list.".format(transition["wavelength"],
utils.element(transition["atomic_number"]), abundance))
equivalent_width = 0
else:
stdout_split = stdout.split("\n")
for line in stdout_split[::-1]:
if line.startswith(" equivalent width: "):
equivalent_width = float(line.strip().split()[2])
if not np.isfinite(equivalent_width):
equivalent_width = 0
break
else:
raise ValueError("No equivalent width found for transition at"\
" {0:.2f} Angstroms".format(transition["wavelength"]))
if full_output:
return (spectrum, equivalent_width, stdout)
return spectrum
def synthesise(self, teff, logg, metallicity, xi, wavelengths,
wavelength_steps=(0.10, 0.005, 1.5), abundances=None, line_list_filename=None,
full_output=False):
"""
Synthesise spectra for a given effective temperature, surface gravity,
metallicity, microturbulence, and abundances.
:param teff:
Effective temperature of the model atmosphere (Kelvin).
:type teff:
float
:param logg:
Surface gravity of the model atmosphere.
:type logg:
float
:param metallicity:
Mean | |
<gh_stars>1-10
from visupport import *
class BaseModule:
c = np.cast[theano.config.floatX](- 0.5 * np.log(2 * np.pi))
def __init__(self):
# initialise internal energies for
# [0]: time-dependent states
# [1]: parameters
# [2]: hyperparameters
self.L = [0., 0., 0.]
self._extra_args_ = tuple()
def on_energy(self):
pass
def add_energy(self, hdm):
hdm.L[0] = hdm.L[0] + self.L[0]
hdm.L[1] = hdm.L[1] + self.L[1]
hdm.L[2] = hdm.L[2] + self.L[2]
def declare_variable(self, _type_, _name_, *args):
var = _type_(_name_, *args)
self._extra_args_ += var,
return var
# ----------------------------------------------------------------- #
# ----------------------------------------------------------------- #
# this will inject prior expectation (of v) to hdm top
class MasterModule(BaseModule):
# dimension of state prior expectation e
prior_dim = None
def __init__(self, hdm, **opts):
super().__init__()
self.EOV = hdm.EMBED_ORDER_V
self.request_state_variable(hdm, **opts)
self.on_state_variable(**opts)
self.register_state_variable(hdm)
self.request_output(hdm)
self.on_state_prediction()
self.on_state_error()
self.register_prediction(hdm)
self.on_energy()
self.add_energy(hdm)
def request_state_variable(self, hdm, **opts):
edim = self.prior_dim
N = hdm.RANK - 1
assert edim == hdm.vdim[N], (
"improper Master module, "
"dimension mismatch: "
"dim(prior exptn)={} but"
"dim(v)={}.".format(edim, hdm.vdim[N]))
eslice, e, E = hdm.new_e(edim)
self.eslice = eslice
self.e = e
self.E = E
def on_state_variable(self, **opts):
pass
def register_state_variable(self, hdm):
edim = self.prior_dim
hdm.reg_e(edim, self.eslice, self.e, self.E, *self._extra_args_)
def request_output(self, hdm):
# use top causal state as output
# this should have an order of EOV
N = hdm.RANK - 1
self.OUTPUT = hdm.Vc[N]
def on_state_prediction(self):
# simply use E[i] as prediction (prior expectation)
# this can also be embedded time series (when using DEM)
self.Epredict = [self.E[i] for i in range(self.EOV)]
self.epredict = T.join(0, *self.Epredict)
def on_state_error(self):
self.Eerror = [self.OUTPUT[i] - self.Epredict[i] for i in range(self.EOV)]
for i in range(self.EOV):
self.Eerror[i].name = 'PREDICT ERR /e [m=M,d={}]'.format(i)
self.eerror = T.join(0, *self.Eerror)
self.eerror.name = 'PREDICT ERR /e [m=M]'
def register_prediction(self, hdm):
hdm.reg_vpe(self.epredict, self.eerror)
class GaussianMasterModule(MasterModule):
def __init__(self, hdm, **opts):
super().__init__(hdm, **opts)
def on_state_variable(self, **opts):
edim = self.prior_dim
self.erough = self.declare_variable(
sym_shasf,
'ROUGHNESS /e [m=M; isca] (default=0.5)',
0.5)
self.eprec_val = self.declare_variable(
sym_shasf,
'PRECISION /e [m=M; isca] (log-default=2)',
2)
self.eprecision = sym_pemb(
T.eye(edim) * T.exp(self.eprec_val),
self.EOV,
self.erough)
def on_energy(self):
edim = self.prior_dim
self.L[0] = (
-0.5 * self.eerror.dot(self.eprecision).dot(self.eerror) +
0.5 * sym_lndet(self.eprecision) +
np.cast[theano.config.floatX](self.c * edim * self.EOV))
self.L[0].name = "L(t)[v/prior]"
class UnivariateGMModule(GaussianMasterModule):
prior_dim = 1
# ----------------------------------------------------------------- #
# ----------------------------------------------------------------- #
class Module(BaseModule):
# dimension of hidden (x) and causal (v) states
states_dim = (None, None)
# dimension of parameters (P) and hyperparameters (H)
params_dim = (None, None)
# dimension of connecting modules (v[m-1] or y)
output_dim = None
def __init__(self, hdm,
fG, fF, **opts):
super().__init__()
self.EOX = hdm.EMBED_ORDER_X
self.EOV = hdm.EMBED_ORDER_V
# self.xdim = xdim # dimension of hidden state
# self.vdim = vdim # dimension of causal state
# self.Psize = Pdim # size/dimension of parameter
# self.Hsize = Hdim # size/dimension of hyperparameter
self.xslice = None # slice index of hdm.u
self.x = None # a slice of hdm.u according to xslice
self.X = None # x arranged into its motion in ascending order
self.vslice = None # slice index of hdm.u
self.v = None # a slice of hdm.u accourding to vslice
self.V = None # v arranged into its motion in ascending order
self.Pslice = None # slice index of hdm.P
self.P = None # a slice of hdm.P according to Pslice
self.p = None # a slice of hdm.p according to Pslice (prior expectation of P)
self.Hslice = None # slice index of hdm.H
self.H = None # a slice of hdm.H according to Hslice
self.h = None # a slice of hdm.h according to Hslice (prior expectation of H)
self.initialise_module(hdm, fG, fF, **opts)
def evaluate_state_equation(self, f, order=0):
return f(self.X[order], self.V[order], self.P)
def initialise_module(self, hdm, fG, fF, **opts):
self.request_hdm_level(hdm, self.output_dim)
self.check_equation(fG)
self.check_equation(fF)
self.request_output(hdm)
self.on_output(**opts)
self.request_state_variable(hdm, **opts)
self.on_state_variable(**opts)
self.register_state_variable(hdm)
self.request_cause_variable(hdm, **opts)
self.on_cause_variable(**opts)
self.register_cause_variable(hdm)
self.request_parameter(hdm, **opts)
self.on_parameter(**opts)
self.register_parameter(hdm)
self.request_hyperparameter(hdm, **opts)
self.on_hyperparameter(**opts)
self.register_hyperparameter(hdm)
self.on_state_prediction(fG, fF)
self.on_state_error()
self.register_state_info(hdm)
self.on_param_error()
self.on_hyparam_error()
self.on_energy() # ***
self.add_energy(hdm)
def request_hdm_level(self, hdm, odim):
self.N = hdm._TOPUP_(odim)
def request_output(self, hdm):
self.OUTPUT = hdm.output
def request_state_variable(self, hdm, **opts):
# request or slice state variables from HDM
# default behaviour is to make slices
xdim = self.states_dim[0]
xslice, x, X = hdm.new_x(xdim)
self.xslice = xslice
self.x = x
self.X = X
def on_state_variable(self, **opts):
pass
def register_state_variable(self, hdm):
xdim = self.states_dim[0]
hdm.reg_x(xdim, self.xslice, self.x, self.X)
def request_cause_variable(self, hdm, **opts):
vdim = self.states_dim[1]
vslice, v, V = hdm.new_v(vdim)
self.vslice = vslice
self.v = v
self.V = V
def on_cause_variable(self, **opts):
pass
def register_cause_variable(self, hdm):
vdim = self.states_dim[1]
hdm.reg_v(vdim, self.vslice, self.v, self.V)
def request_parameter(self, hdm, **opts):
Pdim = self.params_dim[0]
Pslice, P, p = hdm.new_P(Pdim)
self.Pslice = Pslice
self.P = P # parameters (unknown)
self.p = p # prior expectation of parameters (given)
def on_parameter(self, **opts):
pass
def register_parameter(self, hdm):
Pdim = self.params_dim[0]
hdm.reg_P(Pdim, self.P, self.p, self.Pslice, *self._extra_args_)
def request_hyperparameter(self, hdm, **opts):
Hdim = self.params_dim[1]
Hslice, H, h = hdm.new_H(Hdim)
self.Hslice = Hslice
self.H = H # hyperparameters (unknown)
self.h = h # prior expectation of hyperparameters (given)
def on_hyperparameter(self, **opts):
raise NotImplementedError
def register_hyperparameter(self, hdm):
Hdim = self.params_dim[1]
hdm.reg_H(Hdim, self.H, self.h, self.Hslice, *self._extra_args_)
def request_output(self, hdm):
self.OUTPUT = hdm.output
def on_output(self, **opts):
pass
def on_state_prediction(self, fG, fF):
EOX = self.EOX
EOV = self.EOV
G = self.evaluate_state_equation(fG)
F = self.evaluate_state_equation(fF)
self.Vpredict = [
T.Rop(G, self.X[0], self.X[i]) + T.Rop(G, self.V[0], self.V[i])
if i < EOV else
T.Rop(G, self.X[0], self.X[i])
for i in range(EOX)]
self.Xpredict = [
T.Rop(F, self.X[0], self.X[i]) + T.Rop(F, self.V[0], self.V[i])
if i < EOV else
T.Rop(F, self.X[0], self.X[i])
for i in range(EOX)]
for i in range(EOX):
self.Vpredict[i] = T.cast(self.Vpredict[i], theano.config.floatX)
self.Vpredict[i].name = 'PREDICTION /v [m={},d={}]'.format(self.N, i)
for i in range(EOX):
self.Xpredict[i] = T.cast(self.Xpredict[i], theano.config.floatX)
self.Xpredict[i].name = 'PREDICTION /x [m={},n={}]'.format(self.N, i)
self.vpredict = T.join(0, *self.Vpredict)
self.vpredict.name = 'PREDICTION /v [m={}]'.format(self.N)
self.xpredict = T.join(0, *self.Xpredict)
self.xpredict.name = 'PREDICTION /x [m={}]'.format(self.N)
def on_state_error(self):
EOX = self.EOX
self.Verror = [self.OUTPUT[i] - self.Vpredict[i]
for i in range(EOX)]
self.Xerror = [self.X[i + 1] - self.Xpredict[i]
if i < EOX - 1 else
-self.Xpredict[i] for i in range(EOX)]
for i in range(EOX):
self.Verror[i].name = 'PREDICT ERR /v [m={},d={}]'.format(self.N, i)
for i in range(EOX):
self.Xerror[i].name = 'PREDICT ERR /x [m={},n={}]'.format(self.N, i)
self.verror = T.join(0, *self.Verror)
self.verror.name = 'PREDICT ERR /v [m={}]'.format(self.N)
self.xerror = T.join(0, *self.Xerror)
self.xerror.name = 'PREDICT ERR /x [m={}]'.format(self.N)
# self.uerror = T.join(0, self.xerror, self.verror)
# self.uerror.name = 'ERROR/u [m={}]'.format(self.N)
def register_state_info(self, hdm):
hdm.reg_xpe(self.xpredict, self.xerror)
hdm.reg_vpe(self.vpredict, self.verror)
def on_param_error(self):
self.perror = self.P - self.p
def on_hyparam_error(self):
self.herror = self.H - self.h
def set_head(self, hdm):
hdm.__set_head(self.HEAD)
def check_equation(self, eqn):
pass
class ActiveModule(Module):
states_dim = (None, None)
params_dim = (None, None)
action_dim = None
output_dim = None
def __init__(self, hdm, fG, fF, **opts):
self.aslice = None
self.a = None
self.A = None
super().__init__(hdm, fG, fF, **opts)
def evaluate_state_equation(self, f, order=0):
return f(self.A[order], self.X[order], self.V[order], self.P)
def initialise_module(self, hdm, fG, fF, **opts):
self.request_hdm_level(hdm, self.output_dim)
self.check_equation(fG)
self.check_equation(fF)
self.request_output(hdm)
self.on_output(**opts)
self.request_state_variable(hdm, **opts)
self.on_state_variable(**opts)
self.register_state_variable(hdm)
self.request_cause_variable(hdm, **opts)
self.on_cause_variable(**opts)
self.register_cause_variable(hdm)
self.request_action_variable(hdm, **opts)
self.on_action_variable(**opts)
self.register_action_variable(hdm)
self.request_parameter(hdm, **opts)
self.on_parameter(**opts)
self.register_parameter(hdm)
self.request_hyperparameter(hdm, **opts)
self.on_hyperparameter(**opts)
self.register_hyperparameter(hdm)
self.on_state_prediction(fG, fF)
self.on_state_error()
self.register_state_info(hdm)
self.on_param_error()
self.on_hyparam_error()
self.on_energy()
self.add_energy(hdm)
def request_action_variable(self, hdm, **opts):
adim = self.action_dim
aslice, a, A = hdm.new_a(adim)
self.aslice = aslice
self.a = a
self.A = A
def on_action_variable(self, **opts):
pass
def register_action_variable(self, hdm):
adim = self.action_dim
hdm.reg_a(adim, self.aslice, self.a, self.A)
# ----------------------------------------------------------------- #
# ----------------------------------------------------------------- #
class GaussianModule(Module):
def __init__(self, hdm, fG, fF, **opts):
super().__init__(hdm, fG, fF, **opts)
def on_parameter(self, **opts):
Pdim = self.params_dim[0]
# prior precision matrix using default log-precision
self.pprec_val = self.declare_variable(
sym_shasf,
'PRIOR PRECSN/P [m={}; fsca] (log-default=1)'.format(self.N),
1)
self.pprecision = T.eye(Pdim) * T.exp(self.pprec_val)
def on_hyperparameter(self, **opts):
xdim, vdim = self.states_dim
Pdim, Hdim = self.params_dim
# prior precision matrix using default log-precision
self.hprec_val = self.declare_variable(
sym_shasf,
'PRIOR PRECSN/H [m={}; fsca] (log-default=1)'.format(self.N),
1)
self.hprecision = T.eye(Hdim) * T.exp(self.hprec_val)
# declare precision components Qx & Qv
# each column of Qx/Qv is a vectorised covariance component
self.Qx = self.declare_variable(
sym_shamf,
'PRECN COMPNT/x [m={}; fmat]'.format(self.N))
self.Qv = self.declare_variable(
sym_shamf,
'PRECN COMPNT/v [m={}; fmat]'.format(self.N - 1))
nQx = self.Qx.shape[1]
nQv = self.Qv.shape[1]
Px = self.Qx * T.exp(self.H[:nQx])
Px = Px.sum(axis=1).reshape((xdim, xdim))
Pv = self.Qv * T.exp(self.H[nQx:nQx + nQv])
Pv = Pv.sum(axis=1).reshape((vdim, vdim))
| |
<reponame>termux-one/EasY_HaCk<gh_stars>1000+
from __future__ import print_function
import cookielib
import hashlib
import hmac
import HTMLParser
import os
import re
import socket
import sqlite3
import struct
import sys
import textwrap
import time
import urllib
import urlparse
# framework libs
from recon.core import framework
#=================================================
# MODULE CLASS
#=================================================
class BaseModule(framework.Framework):
def __init__(self, params, query=None):
framework.Framework.__init__(self, params)
self.options = framework.Options()
# register a data source option if a default query is specified in the module
if self.meta.get('query'):
self._default_source = self.meta.get('query')
self.register_option('source', 'default', True, 'source of input (see \'show info\' for details)')
# register all other specified options
if self.meta.get('options'):
for option in self.meta.get('options'):
self.register_option(*option)
# register any required keys
if self.meta.get('required_keys'):
self.keys = {}
for key in self.meta.get('required_keys'):
# add key to the database
self._query_keys('INSERT OR IGNORE INTO keys (name) VALUES (?)', (key,))
# migrate the old key if needed
self._migrate_key(key)
# add key to local keys dictionary
# could fail to load on exception here to prevent loading modules
# without required keys, but would need to do it in a separate loop
# so that all keys get added to the database first. for now, the
# framework will warn users of the missing key, but allow the module
# to load.
self.keys[key] = self.get_key(key)
if not self.keys.get(key):
self.error('\'%s\' key not set. %s module will likely fail at runtime. See \'keys add\'.' % (key, self._modulename.split('/')[-1]))
self._reload = 0
#==================================================
# SUPPORT METHODS
#==================================================
def _migrate_key(self, key):
'''migrate key from old .dat file'''
key_path = os.path.join(self._home, 'keys.dat')
if os.path.exists(key_path):
try:
key_data = json.loads(open(key_path, 'rb').read())
if key_data.get(key):
self.add_key(key, key_data.get(key))
except:
self.error('Corrupt key file. Manual migration of \'%s\' required.' % (key))
def ascii_sanitize(self, s):
return ''.join([char for char in s if ord(char) in [10,13] + range(32, 126)])
def html_unescape(self, s):
'''Unescapes HTML markup and returns an unescaped string.'''
h = HTMLParser.HTMLParser()
return h.unescape(s)
#p = htmllib.HTMLParser(None)
#p.save_bgn()
#p.feed(s)
#return p.save_end()
def html_escape(self, s):
escapes = {
'&': '&',
'"': '"',
"'": ''',
'>': '>',
'<': '<',
}
return ''.join(escapes.get(c,c) for c in s)
def cidr_to_list(self, string):
# references:
# http://boubakr92.wordpress.com/2012/12/20/convert-cidr-into-ip-range-with-python/
# http://stackoverflow.com/questions/8338655/how-to-get-list-of-ip-addresses
# parse address and cidr
(addrString, cidrString) = string.split('/')
# split address into octets and convert cidr to int
addr = addrString.split('.')
cidr = int(cidrString)
# initialize the netmask and calculate based on cidr mask
mask = [0, 0, 0, 0]
for i in range(cidr):
mask[i/8] = mask[i/8] + (1 << (7 - i % 8))
# initialize net and binary and netmask with addr to get network
net = []
for i in range(4):
net.append(int(addr[i]) & mask[i])
# duplicate net into broad array, gather host bits, and generate broadcast
broad = list(net)
brange = 32 - cidr
for i in range(brange):
broad[3 - i/8] = broad[3 - i/8] + (1 << (i % 8))
# print information, mapping integer lists to strings for easy printing
#mask = '.'.join(map(str, mask))
net = '.'.join(map(str, net))
broad = '.'.join(map(str, broad))
ips = []
f = struct.unpack('!I',socket.inet_pton(socket.AF_INET,net))[0]
l = struct.unpack('!I',socket.inet_pton(socket.AF_INET,broad))[0]
while f <= l:
ips.append(socket.inet_ntop(socket.AF_INET,struct.pack('!I',f)))
f = f + 1
return ips
def parse_name(self, name):
elements = [self.html_unescape(x) for x in name.strip().split()]
# remove prefixes and suffixes
names = []
for i in range(0,len(elements)):
# preserve initials
if re.search(r'^\w\.$', elements[i]):
elements[i] = elements[i][:-1]
# remove unecessary prefixes and suffixes
elif re.search(r'(?:\.|^the$|^jr$|^sr$|^I{2,3}$)', elements[i], re.IGNORECASE):
continue
names.append(elements[i])
# make sense of the remaining elements
if len(names) > 3:
names[2:] = [' '.join(names[2:])]
# clean up any remaining garbage characters
names = [re.sub(r"[,']", '', x) for x in names]
# set values and return names
fname = names[0] if len(names) >= 1 else None
mname = names[1] if len(names) >= 3 else None
lname = names[-1] if len(names) >= 2 else None
return fname, mname, lname
def hosts_to_domains(self, hosts, exclusions=[]):
domains = []
for host in hosts:
elements = host.split('.')
# recursively walk through the elements
# extracting all possible (sub)domains
while len(elements) >= 2:
# account for domains stored as hosts
if len(elements) == 2:
domain = '.'.join(elements)
else:
# drop the host element
domain = '.'.join(elements[1:])
if domain not in domains + exclusions:
domains.append(domain)
del elements[0]
return domains
#==================================================
# OPTIONS METHODS
#==================================================
def _get_source(self, params, query=None):
prefix = params.split()[0].lower()
if prefix in ['query', 'default']:
query = ' '.join(params.split()[1:]) if prefix == 'query' else query
try: results = self.query(query)
except sqlite3.OperationalError as e:
raise framework.FrameworkException('Invalid source query. %s %s' % (type(e).__name__, e.message))
if not results:
sources = []
elif len(results[0]) > 1:
sources = [x[:len(x)] for x in results]
#raise framework.FrameworkException('Too many columns of data as source input.')
else:
sources = [x[0] for x in results]
elif os.path.exists(params):
sources = open(params).read().split()
else:
sources = [params]
source = [self.to_unicode(x) for x in sources]
if not source:
raise framework.FrameworkException('Source contains no input.')
return source
#==================================================
# 3RD PARTY API METHODS
#==================================================
def get_explicit_oauth_token(self, resource, scope, authorize_url, access_url):
token_name = resource+'_token'
token = self.get_key(token_name)
if token:
return token
import urllib
import webbrowser
import socket
client_id = self.get_key(resource+'_api')
client_secret = self.get_key(resource+'_secret')
port = 31337
redirect_uri = 'http://localhost:%d' % (port)
payload = {'response_type': 'code', 'client_id': client_id, 'scope': scope, 'state': self.get_random_str(40), 'redirect_uri': redirect_uri}
authorize_url = '%s?%s' % (authorize_url, urllib.urlencode(payload))
w = webbrowser.get()
w.open(authorize_url)
# open a socket to receive the access token callback
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', port))
sock.listen(1)
conn, addr = sock.accept()
data = conn.recv(1024)
conn.sendall('HTTP/1.1 200 OK\r\nContent-Type: text/html\r\n\r\n<html><head><title>Recon-ng</title></head><body>Response received. Return to Recon-ng.</body></html>')
conn.close()
# process the received data
if 'error_description' in data:
self.error(urllib.unquote_plus(re.search('error_description=([^\s&]*)', data).group(1)))
return None
authorization_code = re.search('code=([^\s&]*)', data).group(1)
payload = {'grant_type': 'authorization_code', 'code': authorization_code, 'redirect_uri': redirect_uri, 'client_id': client_id, 'client_secret': client_secret}
resp = self.request(access_url, method='POST', payload=payload)
if 'error' in resp.json:
self.error(resp.json['error_description'])
return None
access_token = resp.json['access_token']
self.add_key(token_name, access_token)
return access_token
def get_twitter_oauth_token(self):
token_name = 'twitter_token'
token = self.get_key(token_name)
if token:
return token
twitter_key = self.get_key('twitter_api')
twitter_secret = self.get_key('twitter_secret')
url = 'https://api.twitter.com/oauth2/token'
auth = (twitter_key, twitter_secret)
headers = {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'}
payload = {'grant_type': 'client_credentials'}
resp = self.request(url, method='POST', auth=auth, headers=headers, payload=payload)
if 'errors' in resp.json:
raise framework.FrameworkException('%s, %s' % (resp.json['errors'][0]['message'], resp.json['errors'][0]['label']))
access_token = resp.json['access_token']
self.add_key(token_name, access_token)
return access_token
def build_pwnedlist_payload(self, payload, method, key, secret):
timestamp = int(time.time())
payload['ts'] = timestamp
payload['key'] = key
msg = '%s%s%s%s' % (key, timestamp, method, secret)
hm = hmac.new(secret.encode('utf-8'), msg, hashlib.sha1)
payload['hmac'] = hm.hexdigest()
return payload
def get_pwnedlist_leak(self, leak_id):
# check if the leak has already been retrieved
leak = self.query('SELECT * FROM leaks WHERE leak_id=?', (leak_id,))
if leak:
leak = dict(zip([x[0] for x in self.get_columns('leaks')], leak[0]))
del leak['module']
return leak
# set up the API call
key = self.get_key('pwnedlist_api')
secret = self.get_key('pwnedlist_secret')
url = 'https://api.pwnedlist.com/api/1/leaks/info'
base_payload = {'leakId': leak_id}
payload = self.build_pwnedlist_payload(base_payload, 'leaks.info', key, secret)
# make the request
resp = self.request(url, payload=payload)
if resp.status_code != 200:
self.error('Error retrieving leak data.\n%s' % (resp.text))
return
leak = resp.json['leaks'][0]
# normalize the leak for storage
normalized_leak = {}
for item in leak:
value = leak[item]
if type(value) == list:
value = ', '.join(value)
normalized_leak[item] = value
return normalized_leak
def search_twitter_api(self, payload, limit=False):
headers = {'Authorization': 'Bearer %s' % (self.get_twitter_oauth_token())}
url = 'https://api.twitter.com/1.1/search/tweets.json'
results = []
while True:
resp = self.request(url, payload=payload, headers=headers)
if limit:
# app auth rate limit for search/tweets is 450/15min
time.sleep(2)
jsonobj = resp.json
for item in ['error', 'errors']:
if item in jsonobj:
raise framework.FrameworkException(jsonobj[item])
results += jsonobj['statuses']
if 'next_results' in jsonobj['search_metadata']:
max_id = urlparse.parse_qs(jsonobj['search_metadata']['next_results'][1:])['max_id'][0]
payload['max_id'] = max_id
continue
break
return results
def search_shodan_api(self, query, limit=0):
api_key = self.get_key('shodan_api')
url = 'https://api.shodan.io/shodan/host/search'
payload = {'query': query, 'key': api_key}
results = []
cnt = 0
page = 1
self.verbose('Searching Shodan API for: %s' % (query))
while True:
resp = self.request(url, payload=payload)
if resp.json == None:
raise framework.FrameworkException('Invalid JSON response.\n%s' % (resp.text))
if 'error' in resp.json:
raise framework.FrameworkException(resp.json['error'])
if not resp.json['matches']:
break
# add new results
results.extend(resp.json['matches'])
# increment and check the limit
cnt += 1
if limit == cnt:
break
# next page
page += 1
payload['page'] = page
return results
def search_bing_api(self, query, limit=0):
url = 'https://api.cognitive.microsoft.com/bing/v5.0/search'
payload = {'q': query, 'count': 50, 'offset': 0, 'responseFilter': 'WebPages'}
headers = {'Ocp-Apim-Subscription-Key': self.get_key('bing_api')}
results = []
| |
= self.at_directive_kind_pattern.match(s2)
if m:
word = m.group(1)
if word not in g.globalDirectiveList:
return at.noDirective
s3 = s2[m.end(1):]
if s3 and s3[0] in ".(":
return at.noDirective
return at.miscDirective
return at.noDirective
#@+node:ekr.20041005105605.200: *5* at.isSectionName
# returns (flag, end). end is the index of the character after the section name.
def isSectionName(self, s, i):
# 2013/08/01: bug fix: allow leading periods.
while i < len(s) and s[i] == '.':
i += 1
if not g.match(s, i, "<<"):
return False, -1
i = g.find_on_line(s, i, ">>")
if i > -1:
return True, i + 2
return False, -1
#@+node:ekr.20190113043601.1: *5* at.open/closeOutputFile
def openOutputFile(self):
'''Open the output file, which must be file-like'''
at = self
at.outputFile = g.FileLikeObject()
# Can't be inited in initWriteIvars because not valid in @shadow logic.
if g.app.unitTesting:
at.output_newline = '\n'
def closeOutputFile(self):
'''Close the output file, returning its contents.'''
at = self
at.outputFile.flush()
contents = g.toUnicode('' if at.errors else at.outputFile.get())
at.outputFile.close()
at.outputFile = None
return contents
#@+node:ekr.20190109145850.1: *5* at.open/closeOutputStream
# open/close methods used by top-level atFile.write logic.
def openOutputStream(self):
'''Open the output stream, which a list, *not* a file-like object.'''
at = self
at.outputList = []
# Can't be inited in initWriteIvars because not valid in @shadow logic.
if g.app.unitTesting:
at.output_newline = '\n'
def closeOutputStream(self):
'''Close the output stream, returning its contents.'''
at = self
contents = '' if at.errors else ''.join(at.outputList)
at.outputList = []
return contents
#@+node:ekr.20041005105605.201: *5* at.os and allies
# Note: self.outputFile may be either a FileLikeObject or a real file.
#@+node:ekr.20041005105605.202: *6* at.oblank, oblanks & otabs
def oblank(self):
self.os(' ')
def oblanks(self, n):
self.os(' ' * abs(n))
def otabs(self, n):
self.os('\t' * abs(n))
#@+node:ekr.20041005105605.203: *6* at.onl & onl_sent
def onl(self):
"""Write a newline to the output stream."""
self.os('\n') # **not** self.output_newline
def onl_sent(self):
"""Write a newline to the output stream, provided we are outputting sentinels."""
if self.sentinels:
self.onl()
#@+node:ekr.20041005105605.204: *6* at.os
def os(self, s):
"""
Write a string to the output file or stream.
All output produced by leoAtFile module goes here.
"""
at = self
if s.startswith(self.underindentEscapeString):
try:
junk, s = at.parseUnderindentTag(s)
except Exception:
at.exception("exception writing:" + s)
return
s = g.toUnicode(s, at.encoding)
at.outputList.append(s)
#@+node:ekr.20041005105605.205: *5* at.outputStringWithLineEndings
def outputStringWithLineEndings(self, s):
'''
Write the string s as-is except that we replace '\n' with the proper line ending.
Calling self.onl() runs afoul of queued newlines.
'''
at = self
s = g.toUnicode(s, at.encoding)
s = s.replace('\n', at.output_newline)
self.os(s)
#@+node:ekr.20190111045822.1: *5* at.precheck
def precheck(self, fileName, root):
'''
Check for dangerous writes.
Return False if the user declines to do the write.
'''
at = self
if not at.shouldPromptForDangerousWrite(fileName, root):
# Fix bug 889175: Remember the full fileName.
at.rememberReadPath(fileName, root)
return True
# Prompt if the write would overwrite the existing file.
ok = self.promptForDangerousWrite(fileName, kind='@asis')
if ok:
# Fix bug 889175: Remember the full fileName.
at.rememberReadPath(fileName, root)
return True
# Fix #1031: do not add @ignore here!
g.es("not written:", fileName)
return False
#@+node:ekr.20050506090446.1: *5* at.putAtFirstLines
def putAtFirstLines(self, s):
'''Write any @firstlines from string s.
These lines are converted to @verbatim lines,
so the read logic simply ignores lines preceding the @+leo sentinel.'''
at = self; tag = "@first"
i = 0
while g.match(s, i, tag):
i += len(tag)
i = g.skip_ws(s, i)
j = i
i = g.skip_to_end_of_line(s, i)
# Write @first line, whether empty or not
line = s[j: i]
at.os(line); at.onl()
i = g.skip_nl(s, i)
#@+node:ekr.20050506090955: *5* at.putAtLastLines
def putAtLastLines(self, s):
'''Write any @last lines from string s.
These lines are converted to @verbatim lines,
so the read logic simply ignores lines following the @-leo sentinel.'''
at = self; tag = "@last"
# Use g.splitLines to preserve trailing newlines.
lines = g.splitLines(s)
n = len(lines); j = k = n - 1
# Scan backwards for @last directives.
while j >= 0:
line = lines[j]
if g.match(line, 0, tag): j -= 1
elif not line.strip():
j -= 1
else: break
# Write the @last lines.
for line in lines[j + 1: k + 1]:
if g.match(line, 0, tag):
i = len(tag); i = g.skip_ws(line, i)
at.os(line[i:])
#@+node:ekr.20041005105605.206: *5* at.putDirective 4.x & helper
def putDirective(self, s, i):
r'''
Output a sentinel a directive or reference s.
It is important for PHP and other situations that \@first and \@last
directives get translated to verbatim lines that do *not* include what
follows the @first & @last directives.
'''
at = self
k = i
j = g.skip_to_end_of_line(s, i)
directive = s[i: j]
if g.match_word(s, k, "@delims"):
at.putDelims(directive, s, k)
elif g.match_word(s, k, "@language"):
self.putSentinel("@" + directive)
elif g.match_word(s, k, "@comment"):
self.putSentinel("@" + directive)
elif g.match_word(s, k, "@last"):
self.putSentinel("@@last")
# Convert to an verbatim line _without_ anything else.
elif g.match_word(s, k, "@first"):
self.putSentinel("@@first")
# Convert to an verbatim line _without_ anything else.
else:
self.putSentinel("@" + directive)
i = g.skip_line(s, k)
return i
#@+node:ekr.20041005105605.207: *6* at.putDelims
def putDelims(self, directive, s, k):
'''Put an @delims directive.'''
at = self
# Put a space to protect the last delim.
at.putSentinel(directive + " ") # 10/23/02: put @delims, not @@delims
# Skip the keyword and whitespace.
j = i = g.skip_ws(s, k + len("@delims"))
# Get the first delim.
while i < len(s) and not g.is_ws(s[i]) and not g.is_nl(s, i):
i += 1
if j < i:
at.startSentinelComment = s[j: i]
# Get the optional second delim.
j = i = g.skip_ws(s, i)
while i < len(s) and not g.is_ws(s[i]) and not g.is_nl(s, i):
i += 1
at.endSentinelComment = s[j: i] if j < i else ""
else:
at.writeError("Bad @delims directive")
#@+node:ekr.20041005105605.210: *5* at.putIndent
def putIndent(self, n, s=''):
"""Put tabs and spaces corresponding to n spaces,
assuming that we are at the start of a line.
Remove extra blanks if the line starts with the underindentEscapeString"""
tag = self.underindentEscapeString
if s.startswith(tag):
n2, s2 = self.parseUnderindentTag(s)
if n2 >= n:
return
if n > 0: n -= n2
else: n += n2
if n > 0:
w = self.tab_width
if w > 1:
q, r = divmod(n, w)
self.otabs(q)
self.oblanks(r)
else:
self.oblanks(n)
#@+node:ekr.20041005105605.211: *5* at.putInitialComment
def putInitialComment(self):
c = self.c
s2 = c.config.output_initial_comment
if s2:
lines = s2.split("\\n")
for line in lines:
line = line.replace("@date", time.asctime())
if line:
self.putSentinel("@comment " + line)
#@+node:ekr.20190111172114.1: *5* at.replaceFile & helpers
def replaceFile(self, contents, encoding, fileName, root, ignoreBlankLines=False):
'''
Write or create the given file from the contents.
Return True if the original file was changed.
'''
at, c = self, self.c
if root:
root.clearDirty()
#
# Create the timestamp (only for messages).
if c.config.getBool('log-show-save-time', default=False):
format = c.config.getString('log-timestamp-format') or "%H:%M:%S"
timestamp = time.strftime(format) + ' '
else:
timestamp = ''
#
# Adjust the contents.
assert g.isUnicode(contents), g.callers()
if at.output_newline != '\n':
contents = contents.replace('\r', '').replace('\n', at.output_newline)
#
# If file does not exist, create it from the contents.
fileName = g.os_path_realpath(fileName)
sfn = g.shortFileName(fileName)
if not g.os_path_exists(fileName):
ok = g.writeFile(contents, encoding, fileName)
if ok:
c.setFileTimeStamp(fileName)
if not g.unitTesting:
g.es('%screated: %s' % (timestamp, fileName))
if root:
# Fix bug 889175: Remember the full fileName.
at.rememberReadPath(fileName, root)
else:
at.addToOrphanList(root)
# No original file to change. Return value tested by a unit test.
at.checkPythonCode(contents, fileName, root)
return False # No change to original file.
#
# Compare the old and new contents.
old_contents = g.readFileIntoUnicodeString(fileName,
encoding=at.encoding, silent=True)
unchanged = (
contents == old_contents or
(not at.explicitLineEnding and at.compareIgnoringLineEndings(old_contents, contents)) or
ignoreBlankLines and at.compareIgnoringBlankLines(old_contents, contents))
if unchanged:
at.sameFiles += 1
if not g.unitTesting and c.config.getBool('report-unchanged-files', default=True):
g.es('%sunchanged: %s' % (timestamp, sfn))
# Leo 5.6: Check unchanged files.
at.checkPythonCode(contents, fileName, root, pyflakes_errors_only=True)
return False # No change to original file.
#
# Warn if we are only adjusting the line endings.
if at.explicitLineEnding:
ok = (
at.compareIgnoringLineEndings(old_contents, contents) or
ignoreBlankLines and at.compareIgnoringLineEndings(old_contents, contents))
if not ok:
g.warning("correcting line endings in:", fileName)
#
# Write a changed file.
ok = g.writeFile(contents, encoding, fileName)
if ok:
c.setFileTimeStamp(fileName)
if not g.unitTesting:
g.es('%swrote: %s' % | |
max(out_in)
_in_out = min(in_out)
if _out_in < _in_out:
return _out_in, _in_out
else:
return None
def create_maze(shape):
# Random Maze Generator using Depth-first Search
# http://en.wikipedia.org/wiki/Maze_generation_algorithm
# FB - 20121214
my, mx = shape
maze = np.zeros(shape)
dirs = [(0, 1), (0, -1), (1, 0), (-1, 0)]
# start the maze from a random cell
stack = [(np.random.randint(0, my), np.random.randint(0, mx))]
while len(stack) > 0:
(cy, cx) = stack[-1]
maze[cy, cx] = 1
# find a new cell to add
nlst = [] # list of available neighbors
for i, (dy, dx) in enumerate(dirs):
ny = cy + dy
nx = cx + dx
if ny >= 0 and ny < my and nx >= 0 and nx < mx:
if maze[ny, nx] == 0:
# of occupied neighbors must be 1
ctr = 0
for _dy, _dx in dirs:
ex = nx + _dx
ey = ny + _dy
if ex >= 0 and ex < mx and ey >= 0 and ey < my:
if maze[ey, ex] == 1:
ctr += 1
if ctr == 1:
nlst.append(i)
# if 1 or more neighbors available then randomly select one and move
if len(nlst) > 0:
ir = np.random.choice(nlst)
dy, dx = dirs[ir]
cy += dy
cx += dx
stack.append((cy, cx))
else:
stack.pop()
return maze
def header(message, n, char, nl=True):
assert isinstance(char, str)
banner = char * n
newline = "\n" if nl else ""
return "{}{} {} {}{}".format(newline, banner, message.strip(), banner, newline)
def print_header(message, n, char, nl=True):
print(header(message, n, char, nl))
def exactly_2d(x, return_leading_shape=False):
leading_shape = x.shape[:-1]
if return_leading_shape:
return leading_shape, x.reshape(-1, x.shape[-1])
else:
return x.reshape(-1, x.shape[-1])
def generate_perlin_noise_2d(shape, res, normalize=False):
""" each dim of shape must be divisible by corresponding dim of res
from https://pvigier.github.io/2018/06/13/perlin-noise-numpy.html
"""
def f(t):
return 6*t**5 - 15*t**4 + 10*t**3
delta = (res[0] / shape[0], res[1] / shape[1])
d = (shape[0] // res[0], shape[1] // res[1])
grid = np.mgrid[0:res[0]:delta[0], 0:res[1]:delta[1]].transpose(1, 2, 0) % 1
# Gradients
angles = 2*np.pi*np.random.rand(res[0]+1, res[1]+1)
gradients = np.dstack((np.cos(angles), np.sin(angles)))
g00 = gradients[0:-1, 0:-1].repeat(d[0], 0).repeat(d[1], 1)
g10 = gradients[1:, 0:-1].repeat(d[0], 0).repeat(d[1], 1)
g01 = gradients[0:-1, 1:].repeat(d[0], 0).repeat(d[1], 1)
g11 = gradients[1:, 1:].repeat(d[0], 0).repeat(d[1], 1)
# Ramps
n00 = np.sum(grid * g00, 2)
n10 = np.sum(np.dstack((grid[:, :, 0]-1, grid[:, :, 1])) * g10, 2)
n01 = np.sum(np.dstack((grid[:, :, 0], grid[:, :, 1]-1)) * g01, 2)
n11 = np.sum(np.dstack((grid[:, :, 0]-1, grid[:, :, 1]-1)) * g11, 2)
# Interpolation
t = f(grid)
n0 = n00*(1-t[:, :, 0]) + t[:, :, 0]*n10
n1 = n01*(1-t[:, :, 0]) + t[:, :, 0]*n11
result = np.sqrt(2)*((1-t[:, :, 1])*n0 + t[:, :, 1]*n1)
if normalize:
result -= result.min()
mx = result.max()
if mx >= 1e-6:
result /= mx
return result
def prime_factors(n):
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
@contextmanager
def numpy_print_options(**print_options):
old_print_options = np.get_printoptions()
try:
np.set_printoptions(**print_options)
yield
finally:
np.set_printoptions(**old_print_options)
def annotate_with_rectangles(ax, annotations, colors=None, lw=1):
if colors is None:
colors = list(plt.get_cmap('Dark2').colors)
for valid, _, _id, t, b, l, r in annotations:
if not valid:
continue
h = b - t
w = r - l
color = colors[int(_id) % len(colors)]
rect = patches.Rectangle(
(l, t), w, h, clip_on=False, linewidth=lw, edgecolor=color, facecolor='none')
ax.add_patch(rect)
def animate(
*images, labels=None, interval=500,
path=None, block_shape=None, annotations=None, fig_unit_size=1,
text=None, text_loc=None, fontsize='x-small', text_color='black', normalize=None,
**kwargs):
""" Assumes each element of `images` has shape (batch_size, n_frames, H, W, C). Each element is a set of images.
`annotations` only implemented for the first set of images.
`block_shape` controls the shape of the set of plots for an individual example.
`text` should be an array of strings with shape (batch_size, n_frames)
or a dictionary where the keys are indices and each value is an array of strings with shape (batch_size, n_frames).
In the former case, text plotted only on the first image of each set. In the later case, the keys say
which plot within the block to put the text on.
`text_loc` specifies where to draw the text.
`normalize` can be a list of length len(other_images) + 1. Each entry should be a bool saying
whether the corresponding data should be normalized (over the whole video) or not.
"""
n_image_sets = len(images)
B, T = images[0].shape[:2]
if block_shape is None:
N = n_image_sets
sqrt_N = int(np.ceil(np.sqrt(N)))
m = int(np.ceil(N / sqrt_N))
block_shape = (m, sqrt_N)
images = [
img[..., 0] if img.ndim == 5 and img.shape[-1] == 1 else img
for img in images]
assert np.prod(block_shape) >= n_image_sets
fig, axes = square_subplots(B, block_shape=block_shape, fig_unit_size=fig_unit_size)
time_text = fig.text(0.01, .99, 't=0', ha='left', va='top', transform=fig.transFigure, fontsize=12)
plots = np.zeros_like(axes)
text_elements = np.zeros_like(axes)
if text is None:
text = {}
elif not isinstance(text, dict):
text = {0: text}
if text_loc is None:
text_loc = (0.05, 0.95)
if labels is not None:
for j in range(n_image_sets):
axes[0, j].set_title(str(labels[j]))
for ax in axes.flatten():
set_axis_off(ax)
for i in range(B):
for j in range(n_image_sets):
ax = axes[i, j]
_normalize = False
if normalize is not None:
_normalize = normalize[j]
# A note on vmin/vmax: vmin and vmax are set permanently when imshow is called.
# They are not modified when you call set_array.
if _normalize:
vmin = images[j][i].min()
vmax = images[j][i].max()
mean = images[j][i].mean()
ax.set_ylabel('min={:.3f}, mean={:.3f}, max={:.3f}'.format(vmin, mean, vmax))
else:
vmin = 0.0
vmax = 1.0
plots[i, j] = ax.imshow(images[j][i, 0], vmin=vmin, vmax=vmax)
text_elements[i, j] = ax.text(
*text_loc, '', ha='left', va='top', transform=ax.transAxes, fontsize=fontsize, color=text_color)
plt.subplots_adjust(top=0.95, bottom=0.02, left=0.02, right=.98, wspace=0.1, hspace=0.1)
def func(t):
time_text.set_text('t={}'.format(t))
for i in range(B):
for j in range(n_image_sets):
plots[i, j].set_array(images[j][i, t])
ax = axes[i, j]
for obj in ax.findobj(match=plt.Rectangle):
try:
obj.remove()
except NotImplementedError:
pass
if j in text:
text_elements[i, j].set_text(text[j][i, t])
if annotations is not None:
ax = axes[i, 0]
annotate_with_rectangles(ax, annotations[i][t])
anim = animation.FuncAnimation(fig, func, frames=T, interval=interval)
if path is not None:
if not path.endswith('.mp4'):
path = path + '.mp4'
anim.save(path, writer='ffmpeg', codec='hevc', extra_args=['-preset', 'ultrafast'])
return fig, axes, anim, path
def add_rect(ax, top, left, height, width, color, lw=2, **kwargs):
kwargs.update(linewidth=lw)
if 'facecolor' not in kwargs:
kwargs['facecolor'] = 'none'
rect = patches.Rectangle(
(left, top), width, height, edgecolor=color, **kwargs)
ax.add_patch(rect)
def add_dotted_rect(ax, top, left, height, width, c1, c2, **kwargs):
if 'ls' in kwargs:
del kwargs['ls']
if 'linestyle' in kwargs:
del kwargs['ls']
add_rect(ax, top, left, height, width, c1, ls='-', **kwargs)
add_rect(ax, top, left, height, width, c2, ls=':', **kwargs)
def square_subplots(N, block_shape=None, fig_unit_size=1, axes_off=False, **kwargs):
w = int(np.ceil(np.sqrt(N)))
h = int(np.ceil(N / w))
if block_shape is None:
block_shape = (1, 1)
axes_shape = (h*block_shape[0], w*block_shape[1])
if 'figsize' not in kwargs:
# figsize is (width, height)
kwargs['figsize'] = (
axes_shape[1] * fig_unit_size,
axes_shape[0] * fig_unit_size
)
fig, axes = plt.subplots(*axes_shape, **kwargs)
axes = np.array(axes).reshape(*axes_shape)
_axes = np.zeros((w*h, int(np.prod(block_shape))), dtype=np.object)
for i in range(w*h):
_h = i // w
_w = i % w
_axes[i, :] = axes[
_h * block_shape[0]: (_h+1) * block_shape[0],
_w * block_shape[1]: (_w+1) * block_shape[1]
].flatten()
axes = np.array(_axes)
if axes_off:
for ax in axes.flatten():
set_axis_off(ax)
return fig, axes
def grid_subplots(h, w, fig_unit_size, axes_off=False):
try:
fig_unit_size = int(fig_unit_size)
unit_size_h = unit_size_w = fig_unit_size
except Exception:
unit_size_h, unit_size_w = fig_unit_size
fig, axes = plt.subplots(h, w, figsize=(w * unit_size_w, h * unit_size_h))
axes = np.array(axes).reshape(h, w) # to fix the inconsistent way axes is return if h==1 or w==1
if axes_off:
for ax in axes.flatten():
set_axis_off(ax)
return fig, axes
def set_axis_off(ax, remove_border=False):
""" Differs from ax.set_axis_off() in that axis labels are still shown. """
if remove_border:
for spine in ax.spines.values():
spine.set_visible(False)
ax.set_xticklabels('')
ax.set_yticklabels('')
ax.set_xticks([])
ax.set_yticks([])
def nvidia_smi(robust=True):
try:
p = subprocess.run("nvidia-smi".split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.stdout.decode()
except Exception as e:
if robust:
return "Exception while calling nvidia-smi: {}".format(e)
else:
raise
# Different versions of nvidia-smi use different headers and require different functions for parsing processes.
_nvidia_smi_table_end = "+-----------------------------------------------------------------------------+"
_nvidia_smi_no_processes_found = "| No running processes found |"
def _nvidia_smi_parse_processes(s):
lines = s.split('\n')
cuda_version = float(lines[2].split()[-2])
if cuda_version >= 11:
processes_header_line = "| ID ID Usage |"
def parse_process_line(tokens):
"""
Example line:
| |
options: minimize, method='COBYLA'
Nmax = 1000 # Max No. of iterations
ctol = 1.0e-12 # Constraint absolute tolerance.
rhobeg = 0.002 # Reasonable initial changes to the variables.
elif method == 'SLSQP':
# Optimiser options: minimize, method='SLSQP'
Nmax = 1000 # Max No. of iterations
ftol = 1.0e-12 # Precision goal for the value of f in the stopping criterion.
eps = 1.0e-7 # Step size used for numerical approximation of the Jacobian.
else:
print('Optimiser method not recognised:',method)
print('Select one of:',*["'{0}'".format(m) for m in opt_methods])
exit(1)
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#******************************************************************************
#******************************************************************************
# --- END OF USER INPUT SECTIONS ---
# ANY EDITS MADE BEYOND THIS POINT MAY AFFECT THE OPERATION OF THE SOFTWARE
#******************************************************************************
#******************************************************************************
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#******************************************************************************
#******************************************************************************
# Read in free-space loss data from CSV file
#******************************************************************************
#******************************************************************************
# Read from a local file and skip first line and take data only from the
# specified column. The free space loss should be arranged in terms of time
# slots, t.
cvs = np.loadtxt(join(loss_path, loss_file), delimiter= ',', skiprows=1,
usecols=(0, 1, lc-1,))
# Free space loss in dB (to be converted to efficiency)
# Returns the (t,) array FSeff, where t is the total number of time-slots
FSeff = cvs[:,2]
# Find the time slot at the centre of the pass where t = 0.
time0pos = np.where(cvs[:,0] == 0)[0][0]
time0elev = cvs[time0pos,1] # Elevation angle at t = 0 (rads).
time0shift = time0pos # Take a temporary copy to use later.
# Nominal system loss: based on zenith coupling efficiency and nominal losses
sysLoss = -10*(np.math.log10(FSeff[time0pos]) + np.math.log10(eta))
# Maximum elevation angle (degs) of satellite pass
#max_elev = np.degrees(cvs[time0pos,1])
max_elev = time0elev
# Keep elevation shift within bounds
#shift_elev0 = min(90.0,max(0.0,shift_elev0))
# Check that elevation shift angle specified is valid
if (shift_elev0 < 0.0 or (not shift_elev0 < 90)):
print('Error! Shift elevation angle for t = 0 out of bounds:',shift_elev0)
print('Angle should be >= 0 and < 90 degrees.')
exit(1)
if (shift_elev0 != 0.0):
# Shift the elevation angle taken as t = 0 away from elev = 90 deg.
# Find the first array index for an elevation angle greater than, or equal
# to, the shifted angle requested.
time0pos = np.where(cvs[:,1] >= (time0elev - np.radians(shift_elev0)))[0][0]
time0elev = cvs[time0pos,1] # New elevation angle at t = 0 (rads).
time0shift = abs(time0pos - time0shift) # Shift in time slots between old and new t = 0.
else:
# No shift requested, t = 0 is at elev = 90 deg.
time0shift = 0 # Reset this value to zero.
#******************************************************************************
#******************************************************************************
# Parameter checks
#******************************************************************************
#******************************************************************************
# Flag that controls if any data files are to be written
tWriteFiles = False
if any([tFullData,tOptiData,tMultiOpt,tMetrics]):
tWriteFiles = True
if tOptimise:
# Sanity check on parameter bounds
xb[xb < 0] = 0 # All bounds must be positive
xb[xb > 1] = 1 # All bounds are less than or equal to 1
if errcorrFunc in ['None','none']:
tCompareEC = False # Don't perform the same calculation twice
# Check that minimum elevation angle specified is valid
if (min_elev < 0.0 or min_elev > 90):
print('Error! Minimum elevation angle out of bounds:',min_elev)
print('Angle should be between 0 and 90 degrees.')
exit(1)
# Find first dt value corresponding to an elevation greater than min_elev
minElevpos = np.where(cvs[:,1] >= np.radians(min_elev))[0][0] # Index of first value
dt_elev = cvs[minElevpos,0] # Max value of dt less than, or equal to, the
# minimum elevation angle
# Check dt_range is within bounds
dt_max = int(0.5*(len(FSeff) - 1) - time0shift) # Maximum time window half-width
dt_range[dt_range < 0] = 0 # All values must be positive
dt_range[dt_range > dt_max] = dt_max # Max cannot exceed No. of time-slots
dt_range[dt_range > dt_elev] = dt_elev # Limit range by minimum elevation value
# Get minimum elevation for transmission (possibly greater than value specified)
minElevpos = np.where(cvs[:,0] <= dt_range[1])[0][0] # Index of first value
min_elev = np.degrees(cvs[minElevpos,1]) # Minimum elevation (degs)
# Ensure asymptotic case uses correct error estimation function, etc.
if boundFunc in ['Asymptotic','asymptotic']:
Npulse = Rrate / NoPass # Rescale No of pulses to per-pass
NoPass = 1 # Actually infinite but set to unity
errcorrFunc = 'block' # Asymptotic error function
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#******************************************************************************
#******************************************************************************
# Set some machine dependent constants
#******************************************************************************
#******************************************************************************
from sys import float_info
# Extract the smallest float that the current system can:
num_min = float_info.epsilon # round (relative error due to rounding)
#num_min = float_info.min # represent
# Extract the largest float that the current system can represent
num_max = float_info.max
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#******************************************************************************
#******************************************************************************
# Sub-functions used to determine the secure key length and to process data
#******************************************************************************
#******************************************************************************
def x0_rand(mu3,xb,num_min):
"""
Randomly initialise the 5 protocol parameters using the specified bounds.
Parameters and bounds should be specified in the order {Px,pk1,pk2,mu1,mu2}.
Parameters
----------
mu3 : float
Intensity of pulse 3 (vacuum).
xb : float, array-like
Upper and lower bounds for the protocol parameters. (5,2)
num_min : float
An arbitrarily small number.
Returns
-------
x0 : float, array
Randomly initialised protocol parameters.
"""
Px_i = np.random.rand() * (xb[0,1] - xb[0,0] - 2*num_min) + xb[0,0] + \
num_min
pk1_i, pk2_i = 1.0, 1.0
while (pk1_i+pk2_i >= 1.0):
pk1_i = np.random.rand() * (xb[1,1] - xb[1,0] - 2*num_min) + \
xb[1,0] + num_min
pk2_i = np.random.rand() * (min(xb[2,1],1-pk1_i) - xb[2,0] - \
2*num_min) + xb[2,0] + num_min
mu1_i = np.random.rand() * (xb[3,1] - max(xb[3,0],2*mu3) - 2*num_min) + \
max(xb[3,0],2*mu3) + num_min
mu2_i = np.random.rand() * (min(xb[4,1],mu1_i) - max(xb[4,0],mu3) - \
2*num_min) + max(xb[4,0],mu3) + num_min
return np.array([Px_i,pk1_i,pk2_i,mu1_i,mu2_i])
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def check_constraints(Px,pk1,pk2,mu1,mu2,mu3):
"""
Check that the parameters are within the bounds and constraints of the
asymmetric BB84 protocol with weak coherent pulses with 2 'decoy' states.
Stops the script if any bounds or constraints are violated.
Parameters
----------
Px : float
Asymmetric polarisation probability.
pk1 : float
Probability Alice sends pulse intensity 1.
pk2 : float
Probability Alice sends pulse intensity 2.
mu1 : float
Intensity of pulse 1.
mu2 : float
Intensity of pulse 2.
mu3 : float
Intensity of pulse 3.
Returns
-------
None.
"""
# Constraint 1: Check polarisation basis probabilities are valid.
if (Px >= 1.0 or Px <= 0.0):
print("Error! Constraint 1 < Px < 0: ", Px)
exit(1)
# Constraint 2: Check probability of pulse with intensity 1 is in bounds.
if (pk1 >= 1.0 or pk1 <= 0.0):
print("Error! Constraint 1 < pk1 < 0: ", pk1)
exit(1)
# Constraint 3: Check probability of pulse with intensity 2 is in bounds.
if (pk2 >= 1.0 or pk2 <= 0.0):
print("Error! Constraint 1 < pk2 < 0: ", pk2)
exit(1)
# Constraint 4: Check sum of probabilities for intensity 1 & 2 are less
# than unity.
if ((pk1 + pk2) >= 1.0):
print("Error! Constraint (pk1 + pk2) < 1: ", pk1 + pk2)
exit(1)
# Constraint 5: Check value of intensity 1 is in bounds.
if (mu1 >= 1.0 or mu1 <= 0.0):
print("Error! Constraint 1 < mu1 < 0: ", mu1)
exit(1)
# Constraint 6: Check value of intensity 2 is in bounds.
if (mu2 >= 1.0 or mu2 <= 0.0):
print("Error! Constraint 1 < mu2 < 0: ", mu2)
exit(1)
# Constraint 7: Check values of all intensities are in bounds.
if ((mu1 - mu3) <= mu2):
print("Error! Constraint (mu1-mu3) > mu2: ", (mu1-mu3), mu2)
exit(1)
# Constraint 8: Check values of intensities 2 & 3 are in bounds.
if (mu2 <= mu3):
print("Error! Constraint mu2 > mu3: ", mu2, mu3)
exit(1)
return None
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def bool_constraints(Px,pk1,pk2,mu1,mu2,mu3):
"""
Check that the parameters are within the bounds and constraints of the
asymmetric BB84 protocol with weak coherent pulses with 2 'decoy' states.
Returns a boolean array corresponding to each of the constraints.
Parameters
----------
Px : float
Asymmetric polarisation probability.
pk1 : float
Probability Alice sends pulse intensity 1.
pk2 : float
Probability Alice sends pulse intensity 2.
mu1 : float
Intensity of pulse 1.
mu2 : float
Intensity of pulse 2.
mu3 : float
Intensity of pulse 3.
Returns
-------
C : boolean, array-like.
Do the parameters satisfy the constraints? True or False
"""
C = np.array([1,1,1,1,1,1,1,1], dtype=bool) # Initialise array as True
# Constraint 1: Check polarisation basis probabilities are valid.
if (Px >= 1.0 or Px <= 0.0):
C[0] = False
| |
#Daily amount of distance
Index('idx_global_statistics_time', 'time'),)
# log activity from both mobile and web clients
# Note: Web client doesn't have a device_id: Using the latest one for the user
global client_log_table
client_log_table = Table('client_log', metadata,
Column('id', Integer, primary_key=True),
Column('device_id', Integer, ForeignKey('devices.id'), nullable=False),
Column('user_id', Integer, ForeignKey('users.id')),
Column('time', TIMESTAMP(timezone=True), nullable=False, default=func.current_timestamp(),
server_default=func.current_timestamp()),
Column('function',
Enum("MOBILE-REGISTER", "MOBILE-AUTHENTICATE", "MOBILE-PATH",
"MOBILE-DESTINATIONS", "MOBILE-DEST-HISTORY", "MOBILE-CERTIFICATE",
"MOBILE-SHARE-CERTIFICATE", "MOBILE-PATH-EDIT", "MOBILE-FCM-TOKEN",
"WEB-CONNECT", "WEB-PATH", "WEB-CERTIFICATE", "WEB-TRIP-COMPARISON", "WEB-DEST-HISTORY",
"CANCEL-PARTICIPATION", "WEB-PATH-EDIT", "WEB-TRIPS-LIST", "WEB-DOWNLOAD-DATA",
name="client_function_enum")),
Column('info', String),
Index('idx_client_log_time', 'time'))
# Sample line to add new enum values to client_function_enum:
# ALTER TYPE client_function_enum ADD VALUE 'MOBILE-FCM-TOKEN' ;
# A table for the database version number
# used for upgrades
global migrate_version_table
migrate_version_table = Table('migrate_version', metadata,
Column('id', Integer, primary_key=True),
Column('version', Integer, nullable=False))
metadata.create_all(checkfirst=True)
# Database upgrade operations
code_db_version = 1
db_version = get_database_version()
if db_version is None:
db_version = init_database_version(code_db_version)
db_upgraded = False
if db_version < 1:
db_upgraded = True
db.engine.execute(text('ALTER TABLE mass_transit_data ADD COLUMN direction integer;'))
if db_upgraded:
upgrade_database_version(code_db_version)
conn = db.engine.connect().execution_options(isolation_level="AUTOCOMMIT")
conn.execute("""ALTER TYPE client_function_enum
ADD VALUE IF NOT EXISTS 'WEB-PATH-EDIT'""")
conn.execute("""ALTER TYPE client_function_enum
ADD VALUE IF NOT EXISTS 'WEB-TRIPS-LIST'""")
conn.execute("""ALTER TYPE client_function_enum
ADD VALUE IF NOT EXISTS 'WEB-DOWNLOAD-DATA'""")
conn.close()
# Combined view of legs with migrated/detected/user modes and lines
create = text("""
CREATE OR REPLACE VIEW leg_modes AS SELECT
l.id,
l.device_id,
l.user_id,
l.time_start,
l.time_end,
l.coordinate_start,
l.coordinate_end,
l.activity activity_device, -- rename in favor of user-corrected activity
l.cluster_start,
l.cluster_end,
l.km,
l.trip,
mu.mode mode_user, mu.line line_user,
ml.mode mode_live, ml.line line_live,
mp.mode mode_planner, mp.line line_planner,
mf.mode mode_filtered, mf.line line_filtered,
-- combined transit mode
coalesce(mu.mode, ml.mode, mp.mode, mf.mode, activity::text::mode_enum)
AS mode,
-- strict activity including user correction, no mass transit
CASE WHEN mu.mode IN :activity_types THEN mu.mode::text::activity_type_enum
WHEN mu.mode IN :mass_transit_types THEN 'IN_VEHICLE'
ELSE activity
END activity,
-- strict mass transit line_type, no activity
CASE WHEN mu.mode IN :mass_transit_types THEN mu.mode
WHEN mu.mode IS NULL THEN coalesce(ml.mode, mp.mode, mf.mode)
END line_type,
-- collateral coalesce of line on mode
CASE WHEN mu.mode IS NOT NULL THEN mu.line
WHEN ml.mode IS NOT NULL THEN ml.line
WHEN mp.mode IS NOT NULL THEN mp.line
WHEN mf.mode IS NOT NULL THEN mf.line END line_name
FROM legs l
LEFT JOIN modes mu ON mu.leg = l.id AND mu.source = 'USER'
LEFT JOIN modes ml ON ml.leg = l.id AND ml.source = 'LIVE'
LEFT JOIN modes mp ON mp.leg = l.id AND mp.source = 'PLANNER'
LEFT JOIN modes mf ON mf.leg = l.id AND mf.source = 'FILTERED'
""")
params = {
"activity_types": activity_types,
"mass_transit_types": mass_transit_types}
try:
with db.engine.begin() as t:
t.execute(create, **params)
except ProgrammingError as e:
# REPLACE VIEW is rather conservative; replace harder...
with db.engine.begin() as t:
t.execute(text("DROP VIEW leg_modes"))
t.execute(create, **params)
global leg_modes_view
leg_modes_view = Table("leg_modes", metadata,
Column("device_id", ForeignKey(devices_table.c.id)),
Column("user_id", ForeignKey(users_table.c.id)),
Column("trip", ForeignKey(trips_table.c.id)),
autoload=True)
# Functions and triggers that maintain the leg_ends table
with open("sql/legends.sql") as f, db.engine.begin() as t:
# Server restarts can result in this being run concurrently, leading to
# tuple concurrently updated and other racing on function and trigger
# updates. Lock something vaguely relevant to serialize access.
t.execute(text("lock leg_ends in access exclusive mode"))
t.execute(text(f.read()), clustdist=2*DEST_RADIUS_MAX)
# Functions and triggers that maintain the places table
with open("sql/places.sql") as f, db.engine.begin() as t:
t.execute(text("lock places in access exclusive mode"))
t.execute(text(f.read()), clustdist=2*DEST_RADIUS_MAX)
return db, store
# Helper Functions:
def get_rating(user_id, start_date, end_date):
query = '''SELECT * FROM travelled_distances
WHERE time >= :start_date
AND time < :end_date
AND user_id = :user_id;
'''
distance_rows = db.engine.execute(text(query), start_date=start_date, end_date=end_date, user_id=user_id)
rows = distance_rows.fetchall()
ranking = 0
if len(rows) == 0:
return EnergyRating(user_id), ranking
rating = EnergyRating(user_id)
for row in rows:
rating.add_travelled_distances_row(row)
ranking = row["ranking"]
rating.calculate_rating()
return rating, ranking
def get_svg(user_id, firstday=None, lastday=None):
if lastday is None:
if firstday is None:
# No params, end on last summarized user day, typically yesterday
query = text('SELECT max(time) FROM travelled_distances')
lastday = db.engine.execute(query).scalar()
else:
# Only start specified, show seven day window
lastday = firstday + timedelta(days=6)
if lastday is None:
# No data or params, show seven day window up through yesterday
lastday = datetime.datetime.now().replace(
hour=0, minute=0, second=0, microsecond=0) - timedelta(days=2)
if firstday is None:
# Default seven day window back
firstday = lastday - timedelta(days=6)
end_time = lastday + timedelta(days=1)
rating, ranking = get_rating(user_id, firstday, end_time)
# get_rating returns stored 7 day ranking regardless of window length;
# calculate true ranking in window
query = text("""
WITH totals AS (SELECT
user_id,
sum(total_distance * average_co2) / sum(total_distance) co2
FROM travelled_distances
WHERE time >= :firstday AND time <= :lastday
GROUP BY user_id ORDER BY co2),
ranked AS (SELECT *, rank() OVER (order by co2) FROM totals),
maxed AS (SELECT *, last_value(rank) OVER () FROM ranked)
SELECT rank, last_value FROM maxed WHERE user_id = :user""")
ranks = db.engine.execute(
query, firstday=firstday, lastday=lastday, user=user_id).first()
ranking, max_ranking = ranks or (0, 0)
return generate_energy_rating_svg(
rating, firstday, end_time, ranking, max_ranking)
def update_user_distances(user, start, end, update_only=True):
"""Update travelled_distances for given user, based on changes to data
between given start and end. If update_only, disallow writing stats on a
new day, do update global stats."""
# Snap to whole days
start = start.replace(hour=0, minute=0, second=0, microsecond=0)
end += timedelta(days=1, microseconds=-1)
end = end.replace(hour=0, minute=0, second=0, microsecond=0)
data_rows = get_filtered_device_data_points(user, start, end)
# discard suspiciously sharp movement from bogus location jumps
data_rows = trace_discard_sidesteps(data_rows, BAD_LOCATION_RADIUS)
dists = db.metadata.tables["travelled_distances"]
for rating in get_ratings_from_rows(data_rows, user):
where = and_(*(dists.c[x] == rating[x] for x in ["user_id", "time"]))
ex = db.engine.execute(dists.select(where)).first() # no upsert yet
if ex:
db.engine.execute(dists.update(where, rating))
elif not update_only:
# Refrain from writing partial stats for today that daily batch
# then wouldn't update
db.engine.execute(dists.insert([rating]))
# Batch updates may want to defer generating derived sums and rankings
if not update_only:
return
# Update unused weekly rankings based on ratings
query = text("""
SELECT DISTINCT time FROM travelled_distances
WHERE time >= :start AND time < :end + interval '6 days'
AND total_distance IS NOT NULL""")
for row in db.engine.execute(query, start=start, end=end):
generate_rankings(row[0])
# Update unused global distance, co2 average, active users in last 13 days
update_global_statistics(start, end)
def get_ratings_from_rows(filtered_data_rows, user_id):
ratings = []
rows = list(filtered_data_rows)
if len(rows) == 0:
return ratings
previous_time = rows[0]["time"]
current_date = rows[0]["time"].replace(hour = 0, minute = 0, second = 0, microsecond = 0)
previous_location = json.loads(rows[0]["geojson"])["coordinates"]
rating = EnergyRating(user_id, date=current_date)
for row in rows[1:]:
current_activity = row["activity"]
current_time = row["time"]
current_location = json.loads(row["geojson"])["coordinates"]
if (current_time - current_date).total_seconds() >= 60*60*24: #A full day
current_date = current_time.replace(hour = 0, minute = 0, second = 0, microsecond = 0)
rating.calculate_rating()
if not rating.is_empty():
ratings.append(rating.get_data_dict())
rating = EnergyRating(user_id, date=current_date)
if (current_time - previous_time).total_seconds() > MAX_POINT_TIME_DIFFERENCE:
previous_time = current_time
previous_location = current_location
continue
distance = get_distance_between_coordinates(previous_location, current_location) / 1000.0
previous_location = current_location
if current_activity == "IN_VEHICLE":
#TODO: handle FERRY somehow.
if row["line_type"] == "TRAIN":
rating.add_in_mass_transit_A_distance(distance)
elif row["line_type"] in ("TRAM", "SUBWAY"):
rating.add_in_mass_transit_B_distance(distance)
elif row["line_type"] == "BUS":
rating.add_in_mass_transit_C_distance(distance)
else:
rating.add_in_vehicle_distance(distance)
elif current_activity == "ON_BICYCLE":
rating.add_on_bicycle_distance(distance)
elif current_activity == "RUNNING":
rating.add_running_distance(distance)
elif current_activity == "WALKING":
rating.add_walking_distance(distance)
rating.calculate_rating()
if not rating.is_empty():
ratings.append(rating.get_data_dict())
return ratings
def generate_rankings(time):
time_end = time + timedelta(days=1)
time_start = time_end - timedelta(days=7)
query = '''
SELECT user_id, total_distance, average_co2
FROM travelled_distances
WHERE time < :time_end
AND time >= :time_start
'''
travelled_distances_rows = db.engine.execute(text(query), time_start=time_start, time_end=time_end)
total_distances = {}
total_co2 = {}
totals = []
for row in travelled_distances_rows:
if row["total_distance"] is not None:
if row["user_id"] in total_distances:
total_distances[row["user_id"]] += row["total_distance"]
total_co2[row["user_id"]] += row["average_co2"] * row["total_distance"]
else:
total_distances[row["user_id"]] = row["total_distance"]
total_co2[row["user_id"]] = row["average_co2"] * row["total_distance"]
for user_id in total_distances:
totals.append((user_id, total_co2[user_id] / total_distances[user_id]))
totals_sorted = sorted(totals, key=lambda average_co2: average_co2[1])
batch = [
{ "user_id": totals_sorted[i][0],
"time": time,
"ranking": i + 1 }
for i in range(len(totals_sorted))]
if batch:
db.engine.execute(travelled_distances_table.insert(batch))
def update_global_statistics(time_start, the_end):
while (time_start < the_end):
time_end = time_start + timedelta(days=1)
query = '''
SELECT total_distance, average_co2
FROM travelled_distances
WHERE time >= :time_start
AND time < :time_end
'''
travelled_distances_rows = db.engine.execute(text(query), time_start=time_start, time_end=time_end)
new = get_global_statistics_for_day(travelled_distances_rows, time_start)
time_start += timedelta(days=1)
# Manual upsert for older db
where = global_statistics_table.c.time == new["time"]
ex = db.engine.execute(global_statistics_table.select(where)).first()
if ex:
| |
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
elif rrint + data.Ponset[n*12] < 1200 and (data.Toffset[n*12]-rrint) < 0:
temp = df.iloc[:,x][int(n*1200):int(data.Ponset[n*12]+(n*1200))]
test = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)):int(rrint + data.Ponset[n*12]+(n*1200))]
if test.empty == False and temp.empty == False:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
else:
temp = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)-rrint):int(data.Ponset[n*12]+(n*1200))]
test = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)):int(rrint + data.Ponset[n*12]+(n*1200))]
if test.empty == False and temp.empty == False:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
unfiltered_leads = df.copy()
for n in range(count1000):
for inx in range(12):
test = df_fixer(df.iloc[:,inx][n*1200:(n+1)*1200], n)
gaps = []
lstOfNs = []
gap = []
for num in test[test.isna() == True].index:
lstOfNs.append(num)
if len(lstOfNs) == 1:
gap.append(lstOfNs[0])
if len(lstOfNs) > 1:
if lstOfNs[-1] - lstOfNs[-2] < 5:
gap.append(num)
elif lstOfNs[-1] - lstOfNs[-2] > 5:
gaps.append(gap)
gap = []
gap.append(num)
gaps.append(gap)
if gaps != [[]]:
x = []
y = []
for g in gaps:
if len(g) == 1:
x.append([g[-1]+1])
y.append(test[g[-1]+1])
if np.isnan(test.iloc[0]):
point1 = [g[0], test[g[-1]+1]]
point2 = [g[-1]+1, test[g[-1]+1]]
x_temp,y_temp = hanging_line(point1, point2)
x.append(x_temp)
y.append(y_temp)
else:
point1 = [g[0]-1, test[g[0]-1]]
point2 = [g[-1]+1, test[g[-1]+1]]
x_temp,y_temp = hanging_line(point1, point2)
x.append(x_temp)
y.append(y_temp)
for i in range(len(x)):
test[x[i]] = y[i]
if (trapz(abs(test[int(data.Qonset[n*12]):int(data.Qoffset[n*12])]))/trapz(abs(df.iloc[:,inx][int(data.Qonset[12*n]+(1200*n)):int(data.Qoffset[12*n]+(1200*n))]))) < .60:
test = df.iloc[:,inx][n*1200:(n+1)*1200]
test = medfilt(test, kernel_size=9)
df.iloc[:,inx][n*1200:(n+1)*1200] = test
del gaps
del lstOfNs
del gap
del test
VTI_leads = df[['III', 'aVF', 'aVL', 'aVR']]
df = df[['I', 'II', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']]
Unfiltered_VTI_leads = unfiltered_leads[['III', 'aVF', 'aVL', 'aVR']]
unfiltered_leads = unfiltered_leads[['I', 'II', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']]
matrix = [[.38, -.07, -.13, .05, -.01, .14, .06, .54],
[-.07, .93, .06, -.02, -.05, .06, -.17, .13],
[.11, -.23, -.43, -.06, -.14, -.20, -.11, .31]]
x = matrix[0]
y = matrix[1]
z = matrix[2]
n = 0
xtemp = []
ytemp = []
ztemp = []
for i in range(len(df)):
xtemp.append((df.iloc[n].values * x).sum())
ytemp.append((df.iloc[n].values * y).sum())
ztemp.append((df.iloc[n].values * z).sum())
n+=1
df['x'] = xtemp
df['y'] = ytemp
df['z'] = ztemp
n = 0
xtemp = []
ytemp = []
ztemp = []
for i in range(len(unfiltered_leads)):
xtemp.append((unfiltered_leads.iloc[n].values * x).sum())
ytemp.append((unfiltered_leads.iloc[n].values * y).sum())
ztemp.append((unfiltered_leads.iloc[n].values * z).sum())
n+=1
df['Unfiltered_x'] = xtemp
df['Unfiltered_y'] = ytemp
df['Unfiltered_z'] = ztemp
del xtemp
del ytemp
del ztemp
df['Date'] = data['Date']
df['ID'] = data['ID']
df['Time'] = data['Time']
df['Print'] = data['Print']
df['Ponset'] = data['Ponset']
df['Pdur'] = data['Pdur']
df['Poffset'] = data['Poffset']
df['Qonset'] = data['Qonset']
df['Qrsdur'] = data['Qrsdur']
df['Qtint'] = data['Qtint']
df['Qoffset'] = data['Qoffset']
df['Tonset'] = data['Tonset']
df['Tdur'] = data['Tdur']
df['Toffset'] = data['Toffset']
df['HeartRate'] = data['HeartRate']
df['QRSFrontAxis'] = data['QRSFrontAxis']
df['Sex'] = data['Sex']
df['QTC'] = data['QTC']
df['Age'] = data['Age']
df['Name'] = data['Name']
for n in range(count1000):
df['Ponset'][(n*1200):(n+1)*1200] = data['Ponset'][n*12]
df['Print'][(n*1200):(n+1)*1200] = data['Print'][n*12]
df['Pdur'][(n*1200):(n+1)*1200] = data['Pdur'][n*12]
df['Poffset'][(n*1200):(n+1)*1200] = data['Poffset'][n*12]
df['Qonset'][(n*1200):(n+1)*1200] = data['Qonset'][n*12]
df['Qrsdur'][(n*1200):(n+1)*1200] = data['Qrsdur'][n*12]
df['Qtint'][(n*1200):(n+1)*1200] = data['Qtint'][n*12]
df['Qoffset'][(n*1200):(n+1)*1200] = data['Qoffset'][n*12]
df['Tonset'][(n*1200):(n+1)*1200] = data['Tonset'][n*12]
df['Tdur'][(n*1200):(n+1)*1200] = data['Tdur'][n*12]
df['Toffset'][(n*1200):(n+1)*1200] = data['Toffset'][n*12]
df['HeartRate'][(n*1200):(n+1)*1200] = data['HeartRate'][n*12]
df['QRSFrontAxis'][(n*1200):(n+1)*1200] = data['QRSFrontAxis'][n*12]
df['Sex'][(n*1200):(n+1)*1200] = data['Sex'][n*12]
df['QTC'][(n*1200):(n+1)*1200] = data['QTC'][n*12]
df['Age'][(n*1200):(n+1)*1200] = data['Age'][n*12]
df['Date'][(n*1200):(n+1)*1200] = data['Date'][12*n]
df['Time'][(n*1200):(n+1)*1200] = data['Time'][12*n]
df['ID'][(n*1200):(n+1)*1200] = data['ID'][12*n]
df['Name'][(n*1200):(n+1)*1200] = data['Name'][12*n]
df[['III', 'aVF', 'aVL', 'aVR']] = VTI_leads
unfiltered_leads[['III', 'aVF', 'aVL', 'aVR']] = Unfiltered_VTI_leads
df[['Unfiltered_I', 'Unfiltered_II', 'Unfiltered_III', 'Unfiltered_V1', 'Unfiltered_V2', 'Unfiltered_V3', 'Unfiltered_V4', 'Unfiltered_V5', 'Unfiltered_V6', 'Unfiltered_aVF', 'Unfiltered_aVL', 'Unfiltered_aVR']] = unfiltered_leads[['I', 'II', 'III', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'aVF', 'aVL', 'aVR']]
del unfiltered_leads
del VTI_leads
if len(half_data) > 0:
array = np.unique(half_data[half_data.isnull().any(axis=1)][['ID', 'Date', 'Time']])
missing_half_data = half_data.loc[half_data['ID'].isin(array) & half_data['Date'].isin(array) & half_data['Time'].isin(array)]
half_data.drop(missing_half_data.index, axis=0,inplace=True)
missing_half_data = missing_half_data.reset_index(drop=True)
del Tag
del Tags
half_data = half_data.reset_index(drop=True)
for n in range(count500):
half_data.Tonset[n*12:(n+1)*12] = np.repeat(int(half_data.Tonset[n*12:(n+1)*12].sum()/12), 12)
half_data.Pdur[n*12:(n+1)*12] = np.repeat(int(half_data.Pdur[n*12:(n+1)*12].sum()/12), 12)
x = 0
p = []
for x in range(len(half_data.Waveform)):
t = base64.b64decode(half_data.Waveform[x])
p.append(np.asarray(t))
x+=1
p = np.asarray(p)
a = []
for i in p:
o = []
for x in i:
o.append(x)
a.append(o)
half_df = pd.DataFrame(a)
half_df.insert(0, 'Lead', half_data['Lead'])
blank = []
for n in range(count500):
blank.append(pd.pivot_table(half_df[(n*12):(n+1)*12], columns=half_df.Lead))
test = pd.concat(blank)
new = []
array = []
for n in range(13):
for index, num in zip(test.iloc[:, n-1][::2], test.iloc[:, n-1][1::2]):
if num > 128:
new.append(index - (256 * (256 - num)))
elif num < 128:
new.append(index + (256 * num))
elif num == 0:
new.append(index)
else:
new.append(index)
new = []
array.append(new)
array = np.asarray([array[0], array[1], array[2], array[3], array[4], array[5], array[6], array[7], array[8], array[9], array[10], array[11]])
half_df = pd.DataFrame(array)
half_df = pd.pivot_table(half_df, columns=test.columns)
half_df = half_df.fillna(0)
blank = []
for n in range(count500):
blank.append(half_df[(n*1200):((n+1)*1200)-600])
test = pd.concat(blank)
half_df = test
half_df = half_df.reset_index(drop=True)
half_df = pd.pivot_table(half_df, columns=half_df.index)
array = []
for i in range(count500):
for x in range(12):
temp = []
new = []
for n in half_df.iloc[x,i*600:(i+1)*600]:
temp.append(n)
if len(temp) > 1:
new.append(temp[-2])
if len(temp) < 601 and len(temp) > 1:
new.append((temp[-1]+temp[-2])/2)
if len(temp) == 600:
new.append(temp[-1])
new.append(temp[-1])
array.append(new)
I = (np.asarray(array[::12])).reshape(count500*1200)
II = (np.asarray(array[1::12])).reshape(count500*1200)
III = (np.asarray(array[2::12])).reshape(count500*1200)
V1 = (np.asarray(array[3::12])).reshape(count500*1200)
V2 = (np.asarray(array[4::12])).reshape(count500*1200)
V3 = (np.asarray(array[5::12])).reshape(count500*1200)
V4 = (np.asarray(array[6::12])).reshape(count500*1200)
V5 = (np.asarray(array[7::12])).reshape(count500*1200)
V6 = (np.asarray(array[8::12])).reshape(count500*1200)
aVF = (np.asarray(array[9::12])).reshape(count500*1200)
aVL = (np.asarray(array[10::12])).reshape(count500*1200)
aVR = (np.asarray(array[11::12])).reshape(count500*1200)
half_df = pd.pivot_table(pd.DataFrame([I, II, III, V1, V2, V3, V4, V5, V6, aVF, aVL, aVR]), columns=test.columns)
half_df = half_df.fillna(0)
del I
del II
del III
del V1
del V2
del V3
del V4
del V5
del V6
del aVF
del aVL
del aVR
del a
del p
del o
del t
del blank
del new
del array
del temp
for n in range(count500):
for x in range(12):
if ((half_data.Toffset[n*12]-half_data.RRint[n*12]) >= half_data.Ponset[n*12]) or ((half_data.Ponset[n*12] + half_data.RRint[n*12]) - half_data.Toffset[n*12] == 1):
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
else:
rrint = half_data.RRint[n*12]
if (rrint + half_data.Ponset[n*12]) > 1200 and (half_data.Toffset[n*12]-rrint) < 0:
temp = half_df.iloc[:,x][int(n*1200):int(half_data.Ponset[n*12]+(n*1200))]
test = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)):int((n+1)*1200)]
if test.empty == False and temp.empty == False:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
elif (rrint + half_data.Ponset[n*12]) > 1200 and (half_data.Toffset[n*12]-rrint) > 0:
temp = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)-rrint):int(half_data.Ponset[n*12]+(n*1200))]
test = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)):int((n+1)*1200)]
if test.empty == False and temp.empty == False:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
elif rrint + half_data.Ponset[n*12] < 1200 and (half_data.Toffset[n*12]-rrint) < 0:
temp = half_df.iloc[:,x][int(n*1200):int(half_data.Ponset[n*12]+(n*1200))]
test = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)):int(rrint + half_data.Ponset[n*12]+(n*1200))]
if test.empty == False and temp.empty == False:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
else:
temp = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)-rrint):int(half_data.Ponset[n*12]+(n*1200))]
test = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)):int(rrint + half_data.Ponset[n*12]+(n*1200))]
if test.empty == False and temp.empty == False:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
for x in range(12):
half_df.iloc[:,x] = half_df.iloc[:,x]*2.5
unfiltered_half_leads = half_df.copy()
for n in range(count500):
for inx in range(12):
test = half_df_fixer(half_df.iloc[:,inx][n*1200:(n+1)*1200], n)
gaps = []
lstOfNs = []
gap = []
for num in test[test.isna() == True].index:
lstOfNs.append(num)
if len(lstOfNs) == 1:
gap.append(lstOfNs[0])
if len(lstOfNs) > 1:
if lstOfNs[-1] - lstOfNs[-2] < 5:
gap.append(num)
elif lstOfNs[-1] - lstOfNs[-2] > 5:
gaps.append(gap)
gap = []
gap.append(num)
gaps.append(gap)
if gaps != [[]]:
x = []
y = []
for g in gaps:
if len(g) == 1:
x.append([g[-1]+1])
y.append(test[g[-1]+1])
if np.isnan(test.iloc[0]):
point1 = [g[0], test[g[-1]+1]]
| |
# -*- coding: utf-8 -*-
#############################################################################
#
# Copyright © <NAME>
# contact: <EMAIL>
#
# This software is a collection of webservices designed to provide a secure
# and scalable framework to build e-commerce websites.
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-B
# license as circulated by CEA, CNRS and INRIA at the following URL
# " http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
#
#############################################################################
import settings
import cgi
import gevent
from datetime import datetime, timedelta
from B2SProtocol.constants import RESP_RESULT
from B2SUtils import db_utils
from B2SUtils.common import parse_ts
from B2SUtils.errors import ValidationError
from common.constants import TICKET_FEEDBACK
from common.constants import TICKET_PRIORITY
from common.utils import get_user_info
from common.utils import push_ticket_event
from webservice.base import BaseJsonResource
class BaseTicketPostResource(BaseJsonResource):
def _on_post(self, req, resp, conn, **kwargs):
values = self._get_valid_params()
ticket_id = self._save_ticket(conn, values)
if values['parent_id'] == '0':
self._update_ticket(conn, ticket_id)
else:
self._update_parent_ticket(conn, values['parent_id'])
gevent.spawn(push_ticket_event,
email=self.get_user_email(values.get('fo_author')
or values.get('fo_recipient')),
service_email=settings.SERVICE_EMAIL,
id_brand=values['id_brand'])
return {"res": RESP_RESULT.S,
"err": "",
"id": ticket_id}
def _save_ticket(self, conn, values):
ticket_id = db_utils.insert(conn, "ticket", values=values, returning='id')[0]
# attachment
form_params = cgi.parse_qs(self.request.query_string)
for _id in form_params.get('attachment', []):
db_utils.update(conn, "ticket_attachment",
values={'id_ticket': ticket_id},
where={'id': _id})
return ticket_id
def _update_ticket(self, conn, ticket_id):
db_utils.update(conn, "ticket",
values={'thread_id': ticket_id},
where={'id': ticket_id})
def _update_parent_ticket(self, conn, parent_id):
db_utils.update(conn, "ticket",
values={'replied': True, 'locked': False},
where={'id': parent_id})
def _get_valid_params(self):
values = {'priority': TICKET_PRIORITY.NORMAL,
'id_brand': self.request.get_param('id_brand'),
'created': datetime.utcnow()}
self._check_subject(values)
self._check_message(values)
self._check_author(values)
self._check_parent_id(values)
self._check_id_order(values)
self._check_id_shipment(values)
return values
def _check_subject(self, values):
values['subject'] = self._check_param_existing('subject')
def _check_message(self, values):
values['message'] = self._check_param_existing('message')
def _check_author(self, values):
pass
def _check_parent_id(self, values):
parent_id = self.request.get_param('parent_id')
if parent_id and parent_id != '0':
row = self._check_param_db_existing('parent_id', 'ticket',
("id", "id_brand", "fo_author", "bo_author", "thread_id"))
values['parent_id'] = row[0]
values['id_brand'] = row[1]
values['fo_recipient'] = row[2]
values['bo_recipient'] = row[3]
values['thread_id'] = row[4]
else:
values['parent_id'] = '0'
def _check_id_order(self, values):
id_order = self.request.get_param('id_order')
if id_order:
row = self._check_param_db_existing('id_order', 'orders')
values['id_order'] = row[0]
def _check_id_shipment(self, values):
id_shipment = self.request.get_param('id_shipment')
if id_shipment:
row = self._check_param_db_existing('id_shipment', 'shipments')
values['id_shipment'] = row[0]
def _check_param_existing(self, fname):
fvalue = self.request.get_param(fname)
if not fvalue:
raise ValidationError('INVALID_PARAM_%s' % fname.upper())
return fvalue
def _check_param_db_existing(self, fname, table, fields=None):
if not fields:
fields = ("id",)
fvalue = self.request.get_param(fname)
result = db_utils.select(self.conn, table,
columns=fields,
where={'id': fvalue})
if len(result) == 0:
raise ValidationError('INVALID_PARAM_%s' % fname.upper())
return result[0]
def get_user_email(self, id_user):
row = db_utils.select(self.conn, 'users',
columns=('email',),
where={'id': id_user})[0]
return row[0]
class TicketPostResource(BaseTicketPostResource):
encrypt = True
def _check_author(self, values):
values['bo_author'] = self._check_param_existing('author')
class TicketPost4FUserResource(BaseTicketPostResource):
login_required = {'get': True, 'post': True}
def _check_author(self, values):
values['fo_author'] = self.users_id
class BaseTicketListResource(BaseJsonResource):
def _on_get(self, req, resp, conn, **kwargs):
sql = """select id from ticket
where exists (
select 1 from ticket as inner_ticket
where inner_ticket.thread_id = ticket.id
%(filter_sql)s
)
%(sort_sql)s %(page_sql)s
"""
params = []
filter_sql = []
self._filter(req, filter_sql, params)
sort_sql = []
self._sort(req, sort_sql, params)
page_sql = []
self._paginate(req, page_sql, params)
threads = db_utils.query(self.conn,
sql % {'filter_sql': ''.join(filter_sql),
'sort_sql': ''.join(sort_sql),
'page_sql': ''.join(page_sql)},
params)
thread_ids = [t[0] for t in threads]
return self.get_threads_details(thread_ids)
def get_threads_details(self, thread_ids):
if not thread_ids:
return []
columns = ("id", "thread_id", "subject", "message",
"priority", "feedback", "id_order", "id_shipment",
"fo_author", "bo_author", "created")
sql = ("select %s from ticket"
" where thread_id in (%s)"
" order by thread_id, created"
% (",".join(columns),
",".join(map(str, thread_ids))))
rows = db_utils.query(self.conn, sql)
thread_dict = {}
cached_user_dict = {}
for row in rows:
row_dict = dict(zip(columns, row))
row_dict['attachments'] = self.get_attachments_info(row_dict['id'])
if row_dict['fo_author']:
if row_dict['fo_author'] not in cached_user_dict:
cached_user_dict[row_dict['fo_author']] = \
get_user_info(self.conn, row_dict['fo_author'])
extra_user_info = cached_user_dict[row_dict['fo_author']]
row_dict.update(extra_user_info)
if row_dict['thread_id'] not in thread_dict:
thread_dict[row_dict['thread_id']] = []
thread_dict[row_dict['thread_id']].append(row_dict)
return [thread_dict.get(thread_id, []) for thread_id in thread_ids]
def get_attachments_info(self, ticket_id):
rows = db_utils.select(self.conn, 'ticket_attachment',
columns=('id', ),
where={'id': ticket_id})
return [{'id': r[0]} for r in rows]
def _filter(self, req, sql, params):
pass
def _sort(self, req, sql, params):
sort = req.get_param('sort') or '-time'
if sort:
if sort[1:] == 'time':
sql.append(" order by created ")
elif sort[1:] == 'prio':
sql.append(" order by priority ")
else:
raise ValidationError('INVALID_PARAM_SORT')
if sort[0] not in ('+', '-'):
raise ValidationError('INVALID_PARAM_SORT')
sql.append(("desc" if sort[0] == '-' else "asc"))
def _paginate(self, req, sql, params):
page = req.get_param('page') or '0'
limit = req.get_param('limit') or '10'
if not limit or not limit.isdigit() or not page or not page.isdigit():
raise ValidationError('INVALID_REQUEST')
offset = int(page) * int(limit)
limit = int(limit) + 1
sql.append(" limit %s offset %s")
params.append(limit)
params.append(offset)
def _filter_brand(self, req, sql, params):
id_brand = req.get_param('id_brand')
if id_brand:
sql.append(" and id_brand=%s")
params.append(id_brand)
def _filter_user(self, req, sql, params):
id_user = req.get_param('id_user')
if id_user:
sql.append(" and (fo_author=%s or fo_recipient=%s)")
params.append(id_user)
def _filter_bo_user(self, req, sql, params):
id_bo_user = req.get_param('id_bo_user')
if id_bo_user:
sql.append(" and (bo_author=%s or bo_recipient=%s)")
params.append(id_bo_user)
def _filter_order(self, req, sql, params):
id_order = req.get_param('id_order')
if id_order:
sql.append(" and id_order=%s")
params.append(id_order)
def _filter_shipment(self, req, sql, params):
id_shipment = req.get_param('id_shipment')
if id_shipment:
sql.append(" and id_shipment=%s")
params.append(id_shipment)
def _filter_new(self, req, sql, params):
new = req.get_param('new') in ('true', 'True')
if new:
sql.append(" and parent_id is null")
def _filter_parent(self, req, sql, params):
parent_id = req.get_param('parent_id')
if parent_id:
sql.append(" and parent_id=%s")
params.append(parent_id)
def _filter_escalation(self, req, sql, params):
escalation = req.get_param('escalation') in ('True', 'true')
if escalation:
sql.append(" and escalation is True")
class TicketListResource(BaseTicketListResource):
encrypt = True
def _filter(self, req, sql, params):
self._filter_brand(req, sql, params)
self._filter_user(req, sql, params)
self._filter_bo_user(req, sql, params)
self._filter_order(req, sql, params)
self._filter_shipment(req, sql, params)
self._filter_new(req, sql, params)
self._filter_parent(req, sql, params)
self._filter_escalation(req, sql, params)
class TicketList4FUserResource(BaseTicketListResource):
login_required = {'get': True, 'post': True}
def _filter(self, req, sql, params):
self._filter_brand(req, sql, params)
self._filter_user(req, sql, params)
self._filter_order(req, sql, params)
self._filter_shipment(req, sql, params)
self._filter_parent(req, sql, params)
def _filter_user(self, req, sql, params):
sql.append(" and (fo_author=%s or fo_recipient=%s)")
params.append(self.users_id)
params.append(self.users_id)
class TicketRateResource(BaseJsonResource):
def _on_post(self, req, resp, conn, **kwargs):
ticket_id = req.get_param('ticket_id')
useful = req.get_param('useful') in ('True', 'true')
feedback = TICKET_FEEDBACK.USEFUL if useful else TICKET_FEEDBACK.USELESS
rows = db_utils.select(self.conn, 'ticket',
columns=('parent_id', 'fo_recipient'),
where={'id': ticket_id},
limit=1)
if rows and len(rows) == 1 \
and rows[0][0] != 0 and rows[0][1] is not None:
db_utils.update(conn, "ticket",
values={'feedback': feedback},
where={'id': ticket_id})
else:
raise ValidationError('INVALID_PARAM_TICKET_ID')
return {"res": RESP_RESULT.S,
"err": ""}
class TicketPriorityResource(BaseJsonResource):
encrypt = True
def _on_post(self, req, resp, conn, **kwargs):
ticket_id = req.get_param('ticket_id')
priority = req.get_param('priority')
escalation = req.get_param('escalation')
update_values = {}
if priority:
if priority.isdigit() and int(priority) not in TICKET_PRIORITY.toDict().values():
raise ValidationError('INVALID_PARAM_PRIORITY')
update_values.update({'priority': priority})
if escalation:
escalation = req.get_param('escalation') in ('True', 'true')
update_values.update({'escalation': escalation})
if escalation:
update_values.update({'escalation_time': datetime.utcnow()})
rows = db_utils.select(self.conn, 'ticket',
columns=('id',),
where={'id': ticket_id},
limit=1)
if not rows or len(rows) == 0:
raise ValidationError('INVALID_PARAM_TICKET_ID')
db_utils.update(conn, "ticket",
values=update_values,
where={'id': ticket_id})
return {"res": RESP_RESULT.S,
"err": ""}
class TicketLockResource(BaseJsonResource):
encrypt = True
def _on_post(self, req, resp, conn, **kwargs):
ticket_id = req.get_param('ticket_id')
rows = db_utils.select(self.conn, 'ticket',
columns=('locked', 'lock_time'),
where={'id': ticket_id},
limit=1)
if not rows or len(rows) == 0:
raise ValidationError('INVALID_PARAM_TICKET_ID')
row = rows[0]
if not row[0] or self._lock_expired(row[1]):
db_utils.update(conn, "ticket",
values={'locked': True,
'lock_time': datetime.utcnow()},
where={'id': ticket_id})
else:
raise ValidationError('TICKET_LOCKED')
return {"res": RESP_RESULT.S,
"err": ""}
def _lock_expired(self, lock_time):
return parse_ts(lock_time) + timedelta(seconds=15*60) \
< datetime.utcnow()
class TicketRelResource(BaseJsonResource):
encrypt = True
def _on_get(self, req, resp, conn, **kwargs):
id_order = req.get_param('id_order')
id_shipment = req.get_param('id_shipment')
sql = "select id from ticket where False"
params = []
if id_order:
sql += " or id_order=%s"
params.append(id_order)
if id_shipment:
| |
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 <EMAIL>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import ast
import base64
import datetime
import json
import queue
import sys
import time
from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal
from functools import wraps
from . import bitcoin
from . import util
from .address import Address, AddressError
from .bitcoin import hash_160, COIN, TYPE_ADDRESS
from .i18n import _
from .plugins import run_hook
from .wallet import create_new_wallet, restore_wallet_from_text
from .transaction import Transaction, multisig_script, OPReturn
from .util import bfh, bh2u, format_satoshis, json_decode, print_error, to_bytes
from .paymentrequest import PR_PAID, PR_UNCONFIRMED, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .simple_config import SimpleConfig
known_commands = {}
def fixoshis(amount):
# fixoshi conversion must not be performed by the parser
return int(COIN*PyDecimal(amount)) if amount not in ['!', None] else amount
class Command:
def __init__(self, func, s):
self.name = func.__name__
self.requires_network = 'n' in s
self.requires_wallet = 'w' in s
self.requires_password = 'p' in s
self.description = func.__doc__
self.help = self.description.split('.')[0] if self.description else None
varnames = func.__code__.co_varnames[1:func.__code__.co_argcount]
self.defaults = func.__defaults__
if self.defaults:
n = len(self.defaults)
self.params = list(varnames[:-n])
self.options = list(varnames[-n:])
else:
self.params = list(varnames)
self.options = []
self.defaults = []
def __repr__(self):
return "<Command {}>".format(self)
def __str__(self):
return "{}({})".format(
self.name,
", ".join(self.params + ["{}={!r}".format(name, self.defaults[i])
for i, name in enumerate(self.options)]))
def command(s):
def decorator(func):
global known_commands
name = func.__name__
known_commands[name] = Command(func, s)
@wraps(func)
def func_wrapper(*args, **kwargs):
c = known_commands[func.__name__]
wallet = args[0].wallet
network = args[0].network
password = kwargs.get('password')
if c.requires_network and network is None:
raise BaseException("Daemon offline") # Same wording as in daemon.py.
if c.requires_wallet and wallet is None:
raise BaseException("Wallet not loaded. Use 'oregano daemon load_wallet'")
if c.requires_password and password is None and wallet.storage.get('use_encryption') \
and not kwargs.get("unsigned"):
return {'error': 'Password required' }
return func(*args, **kwargs)
return func_wrapper
return decorator
class Commands:
def __init__(self, config, wallet, network, callback = None):
self.config = config
self.wallet = wallet
self.network = network
self._callback = callback
def _run(self, method, *args, password_getter=None, **kwargs):
# this wrapper is called from the python console
cmd = known_commands[method]
if cmd.requires_password and self.wallet.has_password():
password = password_getter()
if password is None:
return
else:
password = None
f = getattr(self, method)
if cmd.requires_password:
kwargs.update(password=password)
result = f(*args, **kwargs)
if self._callback:
self._callback()
return result
@staticmethod
def _EnsureDictNamedTuplesAreJSONSafe(d):
""" Address, ScriptOutput and other objects contain bytes. They cannot be serialized
using JSON. This makes sure they get serialized properly by calling .to_ui_string() on them.
See issue #638 """
def DoChk(v):
def ChkList(l):
for i in range(0,len(l)): l[i] = DoChk(l[i]) # recurse
return l
def EncodeNamedTupleObject(nt):
if hasattr(nt, 'to_ui_string'): return nt.to_ui_string()
return nt
if isinstance(v, tuple): v = EncodeNamedTupleObject(v)
elif isinstance(v, list): v = ChkList(v) # may recurse
elif isinstance(v, dict): v = Commands._EnsureDictNamedTuplesAreJSONSafe(v) # recurse
return v
for k in d.keys():
d[k] = DoChk(d[k])
return d
@command('')
def addressconvert(self, address):
"""Convert to/from Legacy <-> Cash Address. Address can be either
a legacy or a Cash Address and both forms will be returned as a JSON
dict."""
try:
addr = Address.from_string(address)
except Exception as e:
raise AddressError(f'Invalid address: {address}') from e
return {
'cashaddr' : addr.to_full_string(Address.FMT_CASHADDR),
'legacy' : addr.to_full_string(Address.FMT_LEGACY),
}
@command('')
def commands(self):
"""List of commands"""
return ' '.join(sorted(known_commands.keys()))
@command('')
def create(self, passphrase=<PASSWORD>, password=<PASSWORD>, encrypt_file=True, seed_type=None, wallet_path=None):
"""Create a new wallet.
If you want to be prompted for an argument, type '?' or ':' (concealed)
"""
d = create_new_wallet(path=wallet_path,
passphrase=<PASSWORD>,
password=password,
encrypt_file=encrypt_file,
seed_type=seed_type,
config=self.config)
return {
'seed': d['seed'],
'path': d['wallet'].storage.path,
'msg': d['msg'],
}
@command('')
def restore(self, text, passphrase=<PASSWORD>, password=<PASSWORD>, encrypt_file=True, wallet_path=None):
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin cash addresses
or bitcoin cash private keys.
If you want to be prompted for an argument, type '?' or ':' (concealed)
"""
d = restore_wallet_from_text(text,
path=wallet_path,
passphrase=<PASSWORD>,
password=password,
encrypt_file=encrypt_file,
config=self.config)
return {
'path': d['wallet'].storage.path,
'msg': d['msg'],
}
@command('wp')
def password(self, password=None, new_password=None):
"""Change wallet password. """
b = self.wallet.storage.is_encrypted()
self.wallet.update_password(password, new_password, b)
self.wallet.storage.write()
return {'password':self.wallet.has_password()}
@command('w')
def get(self, key):
"""Return item from wallet storage"""
return self.wallet.storage.get(key)
@command('')
def getconfig(self, key):
"""Return a configuration variable. """
return self.config.get(key)
@classmethod
def _setconfig_normalize_value(cls, key, value):
if key not in ('rpcuser', 'rpcpassword'):
value = json_decode(value)
try:
value = ast.literal_eval(value)
except:
pass
return value
@command('')
def setconfig(self, key, value):
"""Set a configuration variable. 'value' may be a string or a Python expression."""
value = self._setconfig_normalize_value(key, value)
self.config.set_key(key, value)
return True
@command('')
def make_electrum_seed(self, nbits=132, entropy=1, language=None):
"""Create an Electrum seed"""
from .mnemonic import Mnemonic_Electrum
t = 'electrum'
s = Mnemonic_Electrum(language).make_seed(t, nbits, custom_entropy=entropy)
return s
@command('')
def make_seed(self, nbits=128, language=None):
"""Create a BIP39 seed"""
from .mnemonic import Mnemonic
s = Mnemonic(language).make_seed(num_bits=nbits)
return s
@command('')
def check_electrum_seed(self, seed, entropy=1, language=None):
"""Check that an Electrum seed was generated with given entropy"""
from .mnemonic import Mnemonic_Electrum
return Mnemonic_Electrum(language).check_seed(seed, entropy)
@command('')
def check_seed(self, seed, entropy=1, language=None):
"""This command is deprecated and will fail, use check_electrum_seed instead. """
raise NotImplementedError('check_seed has been removed. Use check_electrum_seed instead.')
@command('n')
def getaddresshistory(self, address):
"""Return the transaction history of any address. Note: This is a
walletless server query, results are not checked by SPV.
"""
sh = Address.from_string(address).to_scripthash_hex()
return self.network.synchronous_get(('blockchain.scripthash.get_history', [sh]))
@command('w')
def listunspent(self):
"""List unspent outputs. Returns the list of unspent transaction
outputs in your wallet."""
l = self.wallet.get_utxos(exclude_frozen=False)
for i in l:
v = i["value"]
i["value"] = str(PyDecimal(v)/COIN) if v is not None else None
i["address"] = i["address"].to_ui_string()
return l
@command('n')
def getaddressunspent(self, address):
"""Returns the UTXO list of any address. Note: This
is a walletless server query, results are not checked by SPV.
"""
sh = Address.from_string(address).to_scripthash_hex()
return self.network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
@command('')
def serialize(self, jsontx):
"""Create a transaction from json inputs.
Inputs must have a redeemPubkey.
Outputs must be a list of {'address':address, 'value':fixoshi_amount}.
"""
keypairs = {}
inputs = jsontx.get('inputs')
outputs = jsontx.get('outputs')
locktime = jsontx.get('locktime', 0)
for txin in inputs:
if txin.get('output'):
prevout_hash, prevout_n = txin['output'].split(':')
txin['prevout_n'] = int(prevout_n)
txin['prevout_hash'] = prevout_hash
sec = txin.get('privkey')
if sec:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
keypairs[pubkey] = privkey, compressed
txin['type'] = txin_type
txin['x_pubkeys'] = [pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
outputs = [(TYPE_ADDRESS, Address.from_string(x['address']), int(x['value'])) for x in outputs]
tx = Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=self.wallet and self.wallet.is_schnorr_enabled())
tx.sign(keypairs)
return tx.as_dict()
@command('wp')
def signtransaction(self, tx, privkey=None, password=None):
"""Sign a transaction. The wallet keys will be used unless a private key is provided."""
tx = Transaction(tx, sign_schnorr=self.wallet and self.wallet.is_schnorr_enabled())
if privkey:
txin_type, privkey2, compressed = bitcoin.deserialize_privkey(privkey)
pubkey = bitcoin.public_key_from_private_key(privkey2, compressed)
tx.sign({pubkey:(privkey2, compressed)})
else:
self.wallet.sign_transaction(tx, password)
return tx.as_dict()
@command('')
def deserialize(self, tx):
"""Deserialize a serialized transaction"""
tx = Transaction(tx)
return self._EnsureDictNamedTuplesAreJSONSafe(tx.deserialize().copy())
@command('n')
def broadcast(self, tx):
"""Broadcast a transaction to the network. """
tx = Transaction(tx)
return self.network.broadcast_transaction(tx)
@command('')
def createmultisig(self, num, pubkeys):
"""Create multisig address"""
assert isinstance(pubkeys, list), (type(num), type(pubkeys))
redeem_script = multisig_script(pubkeys, num)
address = bitcoin.hash160_to_p2sh(hash_160(bfh(redeem_script)))
return {'address':address, 'redeemScript':redeem_script}
@command('w')
def freeze(self, address):
"""Freeze address. Freeze the funds at one of your wallet\'s addresses"""
address = Address.from_string(address)
return self.wallet.set_frozen_state([address], True)
@command('w')
def unfreeze(self, address):
"""Unfreeze address. Unfreeze the funds at one of your wallet\'s address"""
| |
fc00::101/128
!
interface Loopback1
description Loopback1
ipv4 address 10.100.0.1 255.255.255.0
ipv4 address 10.100.1.1 255.255.255.0 secondary
ipv4 address 10.100.2.1 255.255.255.0 secondary
ipv6 address fc00:100::1/64
ipv6 address fc00:100::101/64
ipv6 address fc00:100::201/64
!
interface MgmtEth0/RP0/CPU0/0
description MgmtEth0/RP0/CPU0/0
cdp
vrf VRF-MGMT
ipv4 address 172.23.136.21 255.255.252.0
!
interface GigabitEthernet0/0/0/12
description GigabitEthernet0/0/0/12
mtu 9018
lldp
receive disable
transmit disable
!
negotiation auto
load-interval 30
l2transport
!
!
interface TenGigE0/0/0/4
description TenGigE0/0/0/4
bundle id 51 mode active
cdp
load-interval 30
!
interface TenGigE0/0/0/5
shutdown
!
interface TenGigE0/0/0/5.100 l2transport
description TenGigE0/0/0/5.100
!
interface TenGigE0/0/0/47
description TenGigE0/0/0/47
shutdown
mac-address 201.b19.1234
!
interface BVI101
cdp
description BVI101
ipv4 address 192.168.101.1 255.255.255.0
load-interval 30
mac-address 200.b19.4321
!
interface HundredGigE0/0/1/0
description HundredGigE0/0/1/0
bundle id 10 mode active
cdp
load-interval 30
mac-address 200.b19.5678
!
interface preconfigure GigabitEthernet0/0/0/11
description GigabitEthernet0/0/0/11
shutdown
!
interface preconfigure GigabitEthernet0/0/0/16
description GigabitEthernet0/0/0/16
shutdown
!
interface preconfigure GigabitEthernet0/0/0/17
description GigabitEthernet0/0/0/17
shutdown
!
"""
template_original = """
<doc>
Template for capturing interface configuration data from IOS-XR devices
Note: In order to different interface appearances, the interface block has been replicated.
Be sure to update all blocks accordingly when adding any new values to capture.
</doc>
<macro>
## parses ipv4 addresses to determine which is primary and which are secondary
## and converts dotted-quad subnet mask into cidr format
def ipv4_macro(data):
data_list = list(data.split(" "))
addr = str(data_list[0])
mask = str(data_list[1])
mask = str(sum(bin(int(x)).count('1') for x in mask.split('.')))
ipv4 = addr+"/"+mask
if 'secondary' in data:
is_secondary = True
else:
is_secondary = False
result = { "ipv4" : ipv4, "is_secondary" : is_secondary }
return result
</macro>
## parent group for all interface groups
<group name="interfaces">
## matches primary interfaces
<group>
{{ mode | set(None) }}
{{ description | set(None) }}
{{ speed | set(None) }}
{{ negotiation | set(None) }}
{{ disabled | set(False) }}
interface {{ interface }}
description {{ description | re(".+") }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | PHRASE | _exact_ }}
</group>
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
</group>
## matches pre-configured interfaces
<group>
{{ mode | set('preconfigure') }}
{{ description | set(None) }}
{{ speed | set(None) }}
{{ negotiation | set(None) }}
{{ disabled | set(False) }}
interface preconfigure {{ interface }}
description {{ description | re(".+") }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | PHRASE | _exact_ }}
</group>
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
</group>
## matches sub-interfaces
<group>
{{ mode | set('l2transport') }}
{{ description | set(None) }}
{{ speed | set(None) }}
{{ negotiation | set(None) }}
{{ disabled | set(False) }}
interface {{ interface }} l2transport
description {{ description | re(".+") }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | PHRASE | _exact_ | macro("ipv4_macro") }}
</group>
<group name="ipv6*" method="table" containsall="ipv6">
ipv6 address {{ ipv6 | PHRASE | _exact_ }}
</group>
speed {{ speed }}
negotiation {{ negotiation }}
shutdown {{ disabled | set(True) }}
mac-address {{ mac_address }}
</group>
</group>
"""
parser = ttp(data, template_original, log_level="error")
parser.parse()
res = parser.result()
pprint.pprint(res, width=80)
# test_interface_template_not_collecting_all_data()
def test_interface_template_not_collecting_all_data_reduced():
"""
Below template and data were producing this result:
[[{'interfaces': [{'interface': 'TenGigE0/0/0/5.100'},
{'interface': 'BVI101',
'ipv4': [{'ipv4': '192.168.101.1 255.255.255.0'}]}]}]]
TTP was not collecting mac-address for BVI 101
"""
data = """
interface TenGigE0/0/0/5.100 l2transport
!
interface BVI101
ipv4 address 192.168.101.1 255.255.255.0
mac-address 200.b19.4321
!
"""
template = """
<group name="interfaces">
## matches primary interfaces
<group>
interface {{ interface }}
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | _line_ | _exact_ }}
</group>
mac-address {{ mac_address }}
</group>
## matches sub-interfaces
<group>
interface {{ interface }} l2transport
mac-address {{ mac_address }}
</group>
</group>
"""
parser = ttp(data, template, log_level="error")
parser.parse()
res = parser.result()
# pprint.pprint(res, width=80)
assert res == [
[
{
"interfaces": [
{"interface": "TenGigE0/0/0/5.100"},
{
"interface": "BVI101",
"ipv4": [{"ipv4": "192.168.101.1 255.255.255.0"}],
"mac_address": "200.b19.4321",
},
]
}
]
]
# test_interface_template_not_collecting_all_data_reduced()
@pytest.mark.skipif(True, reason="Need to fix this one")
def test_interface_template_not_collecting_all_data_reduced_2():
"""
Below template and data producing this result:
[[{'interfaces': [{'interface': 'TenGigE0/0/0/5'},
{'interface': 'TenGigE0/0/0/5.100',
'mac_address': '200.b19.1234'},
{'interface': 'BVI101',
'ipv4': [{'ipv4': '192.168.101.1 255.255.255.0'}]},
{'interface': 'HundredGigE0/0/1/0',
'mac_address': '200.b19.5678'}]}]]
Interface BVI should not have IPv4 address matched, but
should have mac-address matched. Problem is due to that
l2transport group starts and it has group for IPv4 addresses,
next match after matching IPv4 is mac-address, but his parent
is a different group, as a result IPv4 address saved under wrong group
and mac-address not saved at all
IDEA: try to implement automatic end of group tracking, to add pevious
groups to self.ended_groups if next, different group starts.
Current solution to this problem would be to use _end_ to explicitly
indicate end of group
"""
data = """
interface TenGigE0/0/0/5
!
interface TenGigE0/0/0/5.100 l2transport
mac-address 200.b19.1234
!
interface BVI101
ipv4 address 192.168.101.1 255.255.255.0
mac-address 200.b19.4321
!
interface HundredGigE0/0/1/0
mac-address 200.b19.5678
!
"""
template_original = """
<group name="interfaces">
## matches primary interfaces
<group>
interface {{ interface }}
mac-address {{ mac_address }}
</group>
## matches sub-interfaces
<group>
interface {{ interface }} l2transport
<group name="ipv4*" method="table" containsall="ipv4">
ipv4 address {{ ipv4 | _line_ | _exact_ }}
</group>
</group>
</group>
"""
parser = ttp(data, template_original, log_level="error")
parser.parse()
res = parser.result()
pprint.pprint(res, width=80)
# test_interface_template_not_collecting_all_data_reduced_2()
def test_issue_61():
data = """
banner motd &
BANNER MESSAGE line 1
BANNER MESSAGE line 2
BANNER MESSAGE line 3
&
some
other staff
"""
template_to_match_marker = "banner motd {{ marker }}"
template_to_parse_banner = """
<group name="motd">
banner motd {{ ignore(banner_marker) }} {{ _start_ }}
{{ banner_mesage | _line_ | joinmatches("\\n") }}
{{ ignore(banner_marker) }} {{ _end_ }}
</group>
"""
# extract marker value
parser = ttp(data, template_to_match_marker)
parser.parse()
marker = parser.result()[0][0]["marker"]
# parse banner
parser = ttp(data, template_to_parse_banner, vars={"banner_marker": marker})
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [[{'motd': {'banner_mesage': 'BANNER MESSAGE line 1\n'
'BANNER MESSAGE line 2\n'
'BANNER MESSAGE line 3'}}]]
# test_issue_61()
def test_fortigate_intf_parsing():
template = """
<group name="interfaces">
config system interface {{ _start_ }}
<group name="/interfaces*">
edit "{{ interface }}"
set allowaccess {{ allowaccess }}
set description "{{ description }}"
set interface "{{ phy_interface }}"
set snmp-index {{ snmp_index }}
set type {{ fgt_int_type }}
set vdom "{{ vdom }}"
set vlanid {{ vlan }}
next {{ _end_ }}
</group>
end {{ _end_ }}
</group>
"""
data = """
config system np6
edit "np6_0"
next
end
config system interface
edit "mgmt1"
set vdom "root"
set ip 10.10.10.1 255.255.255.248
set allowaccess ping
set type physical
set description "mgmt1"
set snmp-index 1
next
edit "port1"
set vdom "internal"
set ip 20.20.20.1 255.255.255.248
set allowaccess ping
set type physical
set snmp-index 2
next
end
config system custom-language
edit "en"
set filename "en"
next
edit "fr"
set filename "fr"
next
end
"""
parser = ttp(data, template)
parser.parse()
res = parser.result()
pprint.pprint(res)
assert res == [[{'interfaces': [{'allowaccess': 'ping',
'description': 'mgmt1',
'fgt_int_type': 'physical',
'interface': 'mgmt1',
'snmp_index': '1',
'vdom': 'root'},
{'allowaccess': 'ping',
'fgt_int_type': 'physical',
'interface': 'port1',
'snmp_index': '2',
'vdom': 'internal'}]}]]
# test_fortigate_intf_parsing()
def test_issue_57_one_more():
"""
Without _anonymous_ group groups id formation bug fix
below template/data were producitng this result:
[[{'portchannel': {'1': {'local_members': [{}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/1',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/2',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]},
'2': {'local_members': [{}],
'remote_members': [{'flag': '{EF}',
'interface': 'GE6/0/3',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'},
{'flag': '{EF}',
'interface': 'GE6/0/4',
'mac': '0000-0000-0000',
'oper_key': '0',
'priority': '32768',
'status': '0',
'sys_id': '0x8000'}]}}}]]
Further debugging revelead the flaw in results selection logic,
due to exclude("Port") statemets group was invalidated and anonymous group_id
was same as parent group_id resulting in new anonymous group matches were not
able to restart the group, fixed by changing the way how anonymous group id formed.
Before fix:
self.ended_groups: set()
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: {('portchannel.{{channel_number}}.local_members*', 0)}
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: {('portchannel.{{channel_number}}.local_members*', 0)}
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
After fix:
self.ended_groups: set()
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*._anonymous_', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: {('portchannel.{{channel_number}}.local_members*._anonymous_', 0)}
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*._anonymous_', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
self.ended_groups: set()
re_["GROUP"].group_id: ('portchannel.{{channel_number}}.local_members*._anonymous_', 0)
re_["GROUP"].parent_group_id: ('portchannel.{{channel_number}}.local_members*', 0)
"""
data = """
Loadsharing Type: Shar -- Loadsharing, NonS -- Non-Loadsharing
Port Status: S -- Selected, U -- Unselected,
I -- Individual, * -- Management port
Flags: A -- LACP_Activity, B -- LACP_Timeout, C -- Aggregation,
D -- Synchronization, E | |
'cleanButton: ' + str(self.cleanButton) + '\n'
s += 'maxButton: ' + str(self.maxButton) + '\n'
s += 'distance: ' + str(self.distance) + '\n'
s += 'rawAngle: ' + str(self.rawAngle) + '\n'
s += 'angleInRadians: ' + str(self.angleInRadians) + '\n'
# no data member needed for this next line
s += 'angleInDegrees: ' + str(math.degrees(self.angleInRadians)) + '\n'
s += 'chargingState: ' + str(self.chargingState) + '\n'
s += 'voltage: ' + str(self.voltage) + '\n'
s += 'current: ' + str(self.current) + '\n'
s += 'temperature: ' + str(self.temperature) + '\n'
s += 'charge: ' + str(self.charge) + '\n'
s += 'capacity: ' + str(self.capacity) + '\n'
return s
def _toBinaryString(self):
""" this converts the calling SensorFrame into a 26-byte
string of the format the roomba sends back
"""
# todo: handle the different subsets (frames) of sensor data
# here are the 26 bytes in list form
slist = [0]*26
# First Frame
# byte 0: bumps and wheeldrops
slist[0] = self.casterDrop << 4 | \
self.leftWheelDrop << 3 | \
self.rightWheelDrop << 2 | \
self.leftBump << 1 | \
self.rightBump
# byte 1: wall data
slist[1] = self.wallSensor
# byte 2: cliff left
slist[2] = self.leftCliff
# byte 3: cliff front left
slist[3] = self.frontLeftCliff
# byte 4: cliff front right
slist[4] = self.frontRightCliff
# byte 5: cliff right
slist[5] = self.rightCliff
# byte 6: virtual wall
slist[6] = self.virtualWall
# byte 7: motor overcurrents
slist[7] = self.driveLeft << 4 | \
self.driveRight << 3 | \
self.mainBrush << 2 | \
self.vacuum << 1 | \
self.sideBrush
# byte 8: dirt detector left
slist[8] = self.leftDirt
# byte 9: dirt detector left
slist[9] = self.rightDirt
# Second Frame
# byte 10: remote control command
slist[10] = self.remoteControlCommand
# byte 11: buttons
slist[11] = self.powerButton << 3 | \
self.spotButton << 2 | \
self.cleanButton << 1 | \
self.maxButton
# bytes 12, 13: distance
highVal, lowVal = _toTwosComplement2Bytes( self.distance )
slist[12] = highVal
slist[13] = lowVal
# bytes 14, 15: angle
highVal, lowVal = _toTwosComplement2Bytes( self.rawAngle )
slist[14] = highVal
slist[15] = lowVal
# Third Frame
# byte 16: charging state
slist[16] = self.chargingState
# bytes 17, 18: voltage
slist[17] = (self.voltage >> 8) & 0xFF
slist[18] = self.voltage & 0xFF
# bytes 19, 20: current
highVal, lowVal = _toTwosComplement2Bytes( self.current )
slist[19] = highVal
slist[20] = lowVal
# byte 21: temperature
slist[21] = self.temperature
# bytes 22, 23: charge
slist[22] = (self.charge >> 8) & 0xFF
slist[23] = self.charge & 0xFF
# bytes 24, 25: capacity
slist[24] = (self.capacity >> 8) & 0xFF
slist[25] = self.capacity & 0xFF
# convert to a string
s = ''.join([ chr(x) for x in slist ])
return s
#
# the robot class
#
class Create:
""" the Create class is an abstraction of the iRobot Create's
SCI interface, including communication and a bit
of processing of the strings passed back and forth
when you create an object of type Create, the code
will try to open a connection to it - so, it will fail
if it's not attached!
"""
# to do: check if we can start in other modes...
def __init__(self, PORT, BAUD_RATE=115200, startingMode=SAFE_MODE):
""" the constructor which tries to open the
connection to the robot at port PORT
"""
_debug = False
# to do: find the shortest safe serial timeout value...
# to do: use the timeout to do more error checking than
# is currently done...
#
# the -1 here is because windows starts counting from 1
# in the hardware control panel, but not in pyserial, it seems
# if PORT is the string 'simulated' (or any string for the moment)
# we use our SRSerial class
print 'PORT is', PORT
if type(PORT) == type('string'):
if PORT == 'sim':
print 'In simulated mode...'
self.ser = 'sim'; # SRSerial('mapSquare.txt')
else:
# for Mac/Linux - use whole port name
# print 'In Mac/Linux mode...'
self.ser = serial.Serial(PORT, baudrate=BAUD_RATE, timeout=0.5)
# otherwise, we try to open the numeric serial port...
else:
# print 'In Windows mode...'
self.ser = serial.Serial(PORT-1, baudrate=BAUD_RATE, timeout=0.5)
# did the serial port actually open?
if self.ser != 'sim' and self.ser.isOpen():
print 'Serial port did open, presumably to a roomba...'
else:
print 'Serial port did NOT open, check the'
print ' - port number'
print ' - physical connection'
print ' - baud rate of the roomba (it\'s _possible_, if unlikely,'
print ' that it might be set to 19200 instead'
print ' of the default 57600 - removing and'
print ' reinstalling the battery should reset it.'
# our OI mode
self.sciMode = OFF_MODE
# our sensor dictionary, currently empty
self.sensord = {}
# here are the variables that constitute the robot's
# estimated odometry, thr is theta in radians...
# these are updated by integrateNextOdometricStep
self.xPose = 0.0
self.yPose = 0.0
self.thrPose = 0.0
self.leftEncoder = -1
self.rightEncoder = -1
self.leftEncoder_old = -1
self.rightEncoder_old = -1
time.sleep(0.3)
self._start() # go to passive mode - want to do this
# regardless of the final mode we'd like to be in...
time.sleep(0.3)
if (startingMode == SAFE_MODE):
print 'Putting the robot into safe mode...'
self.toSafeMode()
if (startingMode == FULL_MODE):
print 'Putting the robot into full mode...'
self.toSafeMode()
time.sleep(0.3)
self.toFullMode()
# We need to read the angle and distance sensors so that
# their values clear out!
time.sleep(0.25)
#self.sensors(6) # read all sensors to establish the sensord dictionary
self.setPose(0,0,0)
_debug = False
def _write(self, byte):
if self._debug==True:
print ord(byte)
self.ser.write(byte)
def getPose(self, dist='cm', angle='deg'):
""" getPose returns the current estimate of the
robot's global pose
dist may be 'cm' or 'mm'
angle may be 'deg' or 'rad'
"""
x = 0; y = 0; th = 0
if dist == 'cm':
x = self.xPose/10.0; y = self.yPose/10.0
else:
x = self.xPose; y = self.yPose
if angle == 'deg':
th = math.degrees(self.thrPose)
else:
th = self.thrPose
return (x,y,th)
def setPose(self, x, y, th, dist='cm', angle='deg'):
""" setPose sets the internal odometry to the input values
x: global x in mm
y: global y in mm
th: global th in radians
dist: 'cm' or 'mm' for x and y
angle: 'deg' or 'rad' for th
"""
if dist == 'cm':
self.xPose = x*10.0; self.yPose = y*10.0
else:
self.xPose = x; self.yPose = y
if angle == 'deg':
self.thrPose = math.radians(th)
else:
self.thrPose = th
def resetPose(self):
""" resetPose simply sets the internal odometry to 0,0,0
"""
self.setPose(0.0,0.0,0.0)
def _getEncoderDelta(self, oldEnc, newEnc):
#encoder wrap around at 2^16
#check if the step is bigger than half the
#possible range and treat this as wraparound
delta = newEnc-oldEnc
if delta< -65536/2:
delta = (newEnc+65536)-oldEnc
if delta> 65536/2:
delta = newEnc-(oldEnc+65536)
return delta
def _integrateNextEncoderStep(self):
if self.leftEncoder_old == -1:
self.leftEncoder_old = self.leftEncoder
self.rightEncoder_old = self.rightEncoder
return
left_diff = self._getEncoderDelta(self.leftEncoder_old,self.leftEncoder)
right_diff = self._getEncoderDelta(self.rightEncoder_old,self.rightEncoder)
left_mm = left_diff / TICK_PER_MM;
right_mm = right_diff / TICK_PER_MM;
distance = (left_mm + right_mm) / 2.0;
dAngle = (right_mm - left_mm) / WHEEL_SPAN
dAngle *= ANGULAR_ERROR
self.thrPose += dAngle
if self.thrPose > 100*math.pi:
self.thrPose -= 101*math.pi
if self.thrPose < -100*math.pi:
self.thrPose += 101*math.pi
self.xPose += distance * math.cos(self.thrPose)
self.yPose += distance * math.sin(self.thrPose)
self.leftEncoder_old = self.leftEncoder
self.rightEncoder_old = self.rightEncoder
def _integrateNextOdometricStepCreate(self, distance, rawAngle):
""" integrateNextOdometricStep adds the reported inputs
distance in mm
rawAngle in degrees
to the estimate of the robot's global pose
"""
# OK, so this _should_ be easy
# distance is, supposedly, the arc length that the center
# of the robot has traveled (the average of
# the two wheel's linear distances)
#
| |
important build fails to build."""
pass
class LKGMCandidateSyncCompletionStage(ManifestVersionedSyncCompletionStage):
"""Stage that records whether we passed or failed to build/test manifest."""
def _GetSlavesStatus(self):
if self._options.debug:
# In debug mode, nothing is uploaded to Google Storage, so we bypass
# the extra hop and just look at what we have locally.
status = manifest_version.BuilderStatus.GetCompletedStatus(self.success)
status_obj = manifest_version.BuilderStatus(status, self.message)
return {self._bot_id: status_obj}
elif not self._build_config['master']:
# Slaves only need to look at their own status.
return ManifestVersionedSyncStage.manifest_manager.GetBuildersStatus(
[self._bot_id])
elif not LKGMCandidateSyncStage.sub_manager:
return ManifestVersionedSyncStage.manifest_manager.GetBuildersStatus(
self._GetSlavesForMaster())
else:
public_builders, private_builders = self._GetSlavesForUnifiedMaster()
statuses = {}
if public_builders:
statuses.update(
LKGMCandidateSyncStage.sub_manager.GetBuildersStatus(
public_builders))
if private_builders:
statuses.update(
ManifestVersionedSyncStage.manifest_manager.GetBuildersStatus(
private_builders))
return statuses
def HandleSuccess(self):
# We only promote for the pfq, not chrome pfq.
# TODO(build): Run this logic in debug mode too.
if (not self._options.debug and
cbuildbot_config.IsPFQType(self._build_config['build_type']) and
self._build_config['master'] and
self._target_manifest_branch == 'master' and
ManifestVersionedSyncStage.manifest_manager != None and
self._build_config['build_type'] != constants.CHROME_PFQ_TYPE):
ManifestVersionedSyncStage.manifest_manager.PromoteCandidate()
if LKGMCandidateSyncStage.sub_manager:
LKGMCandidateSyncStage.sub_manager.PromoteCandidate()
def HandleValidationFailure(self, failing_statuses):
cros_build_lib.PrintBuildbotStepWarnings()
cros_build_lib.Warning('\n'.join([
'The following builders failed with this manifest:',
', '.join(sorted(failing_statuses.keys())),
'Please check the logs of the failing builders for details.']))
def HandleValidationTimeout(self, inflight_statuses):
cros_build_lib.PrintBuildbotStepWarnings()
cros_build_lib.Warning('\n'.join([
'The following builders took too long to finish:',
', '.join(sorted(inflight_statuses.keys())),
'Please check the logs of these builders for details.']))
def _PerformStage(self):
if ManifestVersionedSyncStage.manifest_manager:
ManifestVersionedSyncStage.manifest_manager.UploadStatus(
success=self.success, message=self.message)
statuses = self._GetSlavesStatus()
failing_build_dict, inflight_build_dict = {}, {}
for builder, status in statuses.iteritems():
if status.Failed():
failing_build_dict[builder] = status
elif status.Inflight():
inflight_build_dict[builder] = status
if failing_build_dict or inflight_build_dict:
if failing_build_dict:
self.HandleValidationFailure(failing_build_dict)
if inflight_build_dict:
self.HandleValidationTimeout(inflight_build_dict)
if failing_build_dict or inflight_build_dict:
raise results_lib.StepFailure()
else:
self.HandleSuccess()
class CommitQueueCompletionStage(LKGMCandidateSyncCompletionStage):
"""Commits or reports errors to CL's that failed to be validated."""
def HandleSuccess(self):
if self._build_config['master']:
CommitQueueSyncStage.pool.SubmitPool()
# After submitting the pool, update the commit hashes for uprevved
# ebuilds.
portage_utilities.EBuild.UpdateCommitHashesForChanges(
CommitQueueSyncStage.pool.changes, self._build_root)
if cbuildbot_config.IsPFQType(self._build_config['build_type']):
super(CommitQueueCompletionStage, self).HandleSuccess()
def HandleValidationFailure(self, failing_statuses):
"""Sends the failure message of all failing builds in one go."""
super(CommitQueueCompletionStage, self).HandleValidationFailure(
failing_statuses)
if self._build_config['master']:
failing_messages = [x.message for x in failing_statuses.itervalues()]
CommitQueueSyncStage.pool.HandleValidationFailure(failing_messages)
def HandleValidationTimeout(self, inflight_builders):
super(CommitQueueCompletionStage, self).HandleValidationTimeout(
inflight_builders)
CommitQueueSyncStage.pool.HandleValidationTimeout()
def _PerformStage(self):
if not self.success and self._build_config['important']:
# This message is sent along with the failed status to the master to
# indicate a failure.
self.message = CommitQueueSyncStage.pool.GetValidationFailedMessage()
super(CommitQueueCompletionStage, self)._PerformStage()
class RefreshPackageStatusStage(bs.BuilderStage):
"""Stage for refreshing Portage package status in online spreadsheet."""
def _PerformStage(self):
commands.RefreshPackageStatus(buildroot=self._build_root,
boards=self._boards,
debug=self._options.debug)
class BuildBoardStage(bs.BuilderStage):
"""Stage that is responsible for building host pkgs and setting up a board."""
option_name = 'build'
def __init__(self, options, build_config, boards=None):
super(BuildBoardStage, self).__init__(options, build_config)
if boards is not None:
self._boards = boards
def _PerformStage(self):
chroot_upgrade = True
env = {}
if self._options.clobber:
env['IGNORE_PREFLIGHT_BINHOST'] = '1'
latest_toolchain = self._build_config['latest_toolchain']
if latest_toolchain and self._build_config['gcc_githash']:
env['USE'] = 'git_gcc'
env['GCC_GITHASH'] = self._build_config['gcc_githash']
chroot_path = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)
if not os.path.isdir(chroot_path) or self._build_config['chroot_replace']:
commands.MakeChroot(
buildroot=self._build_root,
replace=self._build_config['chroot_replace'],
use_sdk=self._build_config['use_sdk'],
chrome_root=self._options.chrome_root,
extra_env=env)
chroot_upgrade = False
else:
commands.RunChrootUpgradeHooks(self._build_root)
# Iterate through boards to setup.
for board_to_build in self._boards:
# Only build the board if the directory does not exist.
board_path = os.path.join(chroot_path, 'build', board_to_build)
if os.path.isdir(board_path):
continue
commands.SetupBoard(self._build_root,
board=board_to_build,
usepkg=self._build_config['usepkg_setup_board'],
latest_toolchain=latest_toolchain,
extra_env=env,
profile=self._options.profile or
self._build_config['profile'],
chroot_upgrade=chroot_upgrade)
chroot_upgrade = False
class UprevStage(bs.BuilderStage):
"""Stage that uprevs Chromium OS packages that the builder intends to
validate.
"""
option_name = 'uprev'
def __init__(self, options, build_config, boards=None, enter_chroot=True):
super(UprevStage, self).__init__(options, build_config)
self._enter_chroot = enter_chroot
if boards is not None:
self._boards = boards
def _PerformStage(self):
# Perform chrome uprev.
chrome_atom_to_build = None
if self._chrome_rev:
# TODO(build): If anyone wants this to run outside of the chroot, we'll
# need to update a few things first. But no one does, so we'll leave it.
chrome_atom_to_build = commands.MarkChromeAsStable(
self._build_root, self._target_manifest_branch,
self._chrome_rev, self._boards,
chrome_version=self._options.chrome_version)
useflags = self._build_config['useflags'] or []
pgo_generate = constants.USE_PGO_GENERATE in useflags
# Perform other uprevs.
if self._build_config['uprev']:
overlays, _ = self._ExtractOverlays()
commands.UprevPackages(self._build_root,
self._boards,
overlays,
enter_chroot=self._enter_chroot)
elif self._chrome_rev and not chrome_atom_to_build and not pgo_generate:
# TODO(sosa): Do this in a better way.
sys.exit(0)
class SyncChromeStage(bs.BuilderStage):
"""Stage that syncs Chrome sources if needed."""
option_name = 'managed_chrome'
def _GetArchitectures(self):
"""Get the list of architectures built by this builder."""
return set(self._GetPortageEnvVar('ARCH', b) for b in self._boards)
def _PerformStage(self):
kwargs = {}
if self._chrome_rev == constants.CHROME_REV_SPEC:
kwargs['revision'] = self._options.chrome_version
cpv = None
cros_build_lib.PrintBuildbotStepText('revision %s' % kwargs['revision'])
else:
cpv = portage_utilities.BestVisible(constants.CHROME_CP,
buildroot=self._build_root)
kwargs['tag'] = cpv.version_no_rev.partition('_')[0]
cros_build_lib.PrintBuildbotStepText('tag %s' % kwargs['tag'])
useflags = self._build_config['useflags'] or []
commands.SyncChrome(self._build_root, self._options.chrome_root, useflags,
**kwargs)
if constants.USE_PGO_USE in useflags and cpv is not None:
commands.WaitForPGOData(self._GetArchitectures(), cpv)
if (constants.USE_PGO_GENERATE in useflags and cpv is not None and
commands.CheckPGOData(self._GetArchitectures(), cpv)):
cros_build_lib.Info('PGO data already generated')
sys.exit(0)
class PatchChromeStage(bs.BuilderStage):
"""Stage that applies Chrome patches if needed."""
option_name = 'rietveld_patches'
def _PerformStage(self):
for patch in ' '.join(self._options.rietveld_patches).split():
patch, colon, subdir = patch.partition(':')
if not colon:
subdir = 'src'
commands.PatchChrome(self._options.chrome_root, patch, subdir)
class BuildTargetStage(BoardSpecificBuilderStage):
"""This stage builds Chromium OS for a target.
Specifically, we build Chromium OS packages and perform imaging to get
the images we want per the build spec."""
option_name = 'build'
def __init__(self, options, build_config, board, archive_stage, version):
super(BuildTargetStage, self).__init__(options, build_config, board)
self._env = {}
if self._build_config.get('useflags'):
self._env['USE'] = ' '.join(self._build_config['useflags'])
if self._options.chrome_root:
self._env['CHROME_ORIGIN'] = 'LOCAL_SOURCE'
if self._options.clobber:
self._env['IGNORE_PREFLIGHT_BINHOST'] = '1'
self._archive_stage = archive_stage
self._tarball_dir = None
self._version = version if version else ''
def _CommunicateVersion(self):
"""Communicates to archive_stage the image path of this stage."""
verinfo = manifest_version.VersionInfo.from_repo(self._build_root)
if self._version:
version = self._version
else:
version = verinfo.VersionString()
version = 'R%s-%s' % (verinfo.chrome_branch, version)
# Non-versioned builds need the build number to uniquify the image.
if not self._version:
version += '-b%s' % self._options.buildnumber
self._archive_stage.SetVersion(version)
def HandleSkip(self):
self._CommunicateVersion()
def _BuildImages(self):
# We only build base, dev, and test images from this stage.
images_can_build = set(['base', 'dev', 'test'])
images_to_build = set(self._build_config['images']).intersection(
images_can_build)
rootfs_verification = self._build_config['rootfs_verification']
commands.BuildImage(self._build_root,
self._current_board,
list(images_to_build),
rootfs_verification=rootfs_verification,
version=self._version,
disk_layout=self._build_config['disk_layout'],
extra_env=self._env)
if self._build_config['vm_tests']:
commands.BuildVMImageForTesting(
self._build_root,
self._current_board,
disk_layout=self._build_config['disk_vm_layout'],
extra_env=self._env)
# Update link to latest image.
latest_image = os.readlink(self.GetImageDirSymlink('latest'))
cbuildbot_image_link = self.GetImageDirSymlink()
if os.path.lexists(cbuildbot_image_link):
os.remove(cbuildbot_image_link)
os.symlink(latest_image, cbuildbot_image_link)
self._CommunicateVersion()
def _BuildAutotestTarballs(self):
# Build autotest tarball, which is used in archive step. This is generated
# here because the test directory is modified during the test phase, and we
# don't want to include the modifications in the tarball.
tarballs = commands.BuildAutotestTarballs(self._build_root,
self._current_board,
self._tarball_dir)
self._archive_stage.AutotestTarballsReady(tarballs)
def _BuildFullAutotestTarball(self):
# Build a full autotest tarball for hwqual image. This tarball is to be
# archived locally.
tarball = commands.BuildFullAutotestTarball(self._build_root,
self._current_board,
self._tarball_dir)
self._archive_stage.FullAutotestTarballReady(tarball)
def _PerformStage(self):
build_autotest = (self._build_config['build_tests'] and
self._options.tests)
# If we are using ToT toolchain, don't attempt to update
# the toolchain during build_packages.
skip_toolchain_update = self._build_config['latest_toolchain']
commands.Build(self._build_root,
self._current_board,
build_autotest=build_autotest,
skip_toolchain_update=skip_toolchain_update,
usepkg=self._build_config['usepkg_build_packages'],
nowithdebug=self._build_config['nowithdebug'],
packages=self._build_config['packages'],
chrome_root=self._options.chrome_root,
extra_env=self._env)
# Build images and autotest tarball in parallel.
steps = []
if build_autotest and (self._build_config['upload_hw_test_artifacts'] or
self._build_config['archive_build_debug']):
self._tarball_dir = tempfile.mkdtemp(prefix='autotest')
steps.append(self._BuildAutotestTarballs)
# Build a full autotest tarball only for chromeos_offical builds
if self._build_config['chromeos_official']:
steps.append(self._BuildFullAutotestTarball)
else:
self._archive_stage.AutotestTarballsReady(None)
if self._build_config['images']:
steps.append(self._BuildImages)
else:
self._CommunicateVersion()
parallel.RunParallelSteps(steps)
# TODO(yjhong): Remove this and instruct archive_hwqual to copy the tarball
# directly.
if self._tarball_dir and self._build_config['chromeos_official']:
shutil.copyfile(os.path.join(self._tarball_dir, 'autotest.tar.bz2'),
os.path.join(self.GetImageDirSymlink(),
'autotest.tar.bz2'))
def _HandleStageException(self, exception):
# In case of an exception, this prevents any consumer from starving.
self._archive_stage.AutotestTarballsReady(None)
return super(BuildTargetStage, self)._HandleStageException(exception)
class SignerTestStage(BoardSpecificBuilderStage):
"""Run signer related tests."""
option_name = 'tests'
config_name = 'signer_tests'
# If the signer tests take longer than 30 minutes, abort. They usually take
# five minutes to run.
SIGNER_TEST_TIMEOUT = 1800
def __init__(self, options, build_config, board, archive_stage):
super(SignerTestStage, self).__init__(options, build_config, board)
self._archive_stage = archive_stage
def _PerformStage(self):
if not self._archive_stage.WaitForRecoveryImage():
raise InvalidTestConditionException('Missing recovery image.')
with cros_build_lib.SubCommandTimeout(self.SIGNER_TEST_TIMEOUT):
commands.RunSignerTests(self._build_root,
self._current_board)
class UnitTestStage(BoardSpecificBuilderStage):
"""Run unit tests."""
option_name = 'tests'
config_name = 'unittests'
# If the unit tests take longer than 60 minutes, abort. They usually take
# ten minutes to run.
UNIT_TEST_TIMEOUT = 3600
def _PerformStage(self):
with cros_build_lib.SubCommandTimeout(self.UNIT_TEST_TIMEOUT):
commands.RunUnitTests(self._build_root,
self._current_board,
full=(not self._build_config['quick_unit']),
nowithdebug=self._build_config['nowithdebug'])
class VMTestStage(BoardSpecificBuilderStage):
"""Run autotests in a virtual machine."""
option_name = 'tests'
config_name = 'vm_tests'
def __init__(self, options, build_config, board, archive_stage):
super(VMTestStage, self).__init__(options, build_config, board)
self._archive_stage = archive_stage
def PrintBuildbotLink(self, download_url, filename):
"""Print a link to an artifact in Google Storage.
Args:
download_url: The directory this file can be downloaded from.
filename: The filename of the uploaded file.
"""
url = '%s/%s' % (download_url.rstrip('/'), filename)
text = filename
if filename.endswith('.dmp.txt'):
text = 'crash: %s' % filename
cros_build_lib.PrintBuildbotLink(text, url)
def _ArchiveTestResults(self, test_results_dir):
| |
from numpy import zeros, ones, ndarray, average
from networkx import adjacency_matrix, Graph
from scipy.sparse import diags, lil_matrix, spmatrix
from scipy.sparse.linalg import lsqr
def forward_hierarchical_levels(graph, weight=None):
"""Returns the forward hierarchical levels of the nodes of a network as an array.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward hierarchical levels : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes if graph object, otherwise indexed in the same the numpy/sparse array, holding the value of their forward hierarchical levels.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
k_in = A.sum(axis=1)
elif isinstance(graph, spmatrix):
A = graph.transpose()
k_in = A.sum(axis=1).A1
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
k_in = A.sum(axis=1).A1
D_in = diags(k_in, 0)
L_in = D_in - A
return lsqr(L_in, k_in)[0]
def backward_hierarchical_levels(graph, weight=None):
"""Returns the backward hierarchical levels of the nodes of a network as an array. This is the transpose of the original graph, so out-edges now become in-edges.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward hierarchical levels : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes if graph object, otherwise indexed in the same the numpy/sparse array, holding the value of their forward hierarchical levels.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph
k_in = A.sum(axis=1)
elif isinstance(graph, spmatrix):
A = graph
k_in = A.sum(axis=1).A1
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
k_in = A.sum(axis=1).A1
D_in = diags(k_in, 0)
L_in = D_in - A
return lsqr(L_in, k_in)[0]
def hierarchical_levels(graph, weight=None):
"""Returns the hierarchical levels of the nodes of a network as an array which aids visualisation of the hierarchical structure in the network.
Parameters
----------
graph : Graph
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
hierarchical levels : array
A Nx1 dimensional array indexed by the nodes, in the same order as graph.nodes, holding the value of their hierarchical levels.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
return 0.5*(forward_hierarchical_levels(graph, weight=weight) - backward_hierarchical_levels(graph, weight=weight))
def sparse_forward_hierarchical_differences(graph, weight=None):
''' Just a copy of the forward hierarchical differences function that returns the sparse matrix, instead of the dense representation, in lil format'''
if isinstance(graph, (ndarray, spmatrix)):
A = graph.transpose()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
s = forward_hierarchical_levels(graph, weight=weight)
TD = lil_matrix(A.shape, dtype=float)
for i, j in zip(A.nonzero()[0], A.nonzero()[1]):
TD[i,j] = s[i] - s[j]
return TD
def forward_hierarchical_differences(graph, weight=None):
"""Returns the forward hierarchical differences over the edges of a network in the form of a weighted adjacency matrix
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward hierarchical differences : array
A NxN dimensional array representing a weighted adjacency matrix, with the edge weights corresponding to the forward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
TD = sparse_forward_hierarchical_differences(graph, weight=weight)
return TD.toarray()
def sparse_backward_hierarchical_differences(graph, weight=None):
''' Just a copy of the backward hierarchical differences function that returns the sparse matrix, instead of the dense representation, in lil format'''
if isinstance(graph, (ndarray, spmatrix)):
A = graph
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight)
s = backward_hierarchical_levels(graph, weight=weight)
TD = lil_matrix(A.shape, dtype=float)
for i, j in zip(A.nonzero()[0], A.nonzero()[1]):
TD[i,j] = s[i] - s[j]
return TD
def backward_hierarchical_differences(graph, weight=None):
"""Returns the backward hierarchical differences over the edges of a network in the form of a weighted adjacency matrix
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward hierarchical differences : array
A NxN dimensional array representing a weighted adjacency matrix, with the edge weights corresponding to the backward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
TD = sparse_backward_hierarchical_differences(graph, weight=weight)
return TD.toarray()
def forward_hierarchical_incoherence(graph, weight=None):
"""Returns the forward hierarchical differences over the edges of a network in the form of a weighted adjacency matrix,
mean of the distribution of differences and standard deviation of this distribution.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string or None
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
forward hierarchical differences : sparse array
A NxN sparse dimensional sparse array representing a weighted adjancency matrix, with the edge weights corresponding to the forward hierarchical differences.
The column index represents the source node of the edge and the row index represents the destination node of the edge.
mean hierarchical difference : float
The mean of the distribution of forward hierarchical differences.
forward hierarchical incoherence : float
The standard deviation of the distribution of forward hierarchical differences.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2019).
Graph hierarchy and spread of infections.
arXiv preprint arXiv:1908.04358."""
if isinstance(graph, ndarray):
A = graph.transpose()
TD = forward_hierarchical_differences(graph, weight=weight)
m = average(TD, weights=A)
m2 = average(TD**2, weights=A)
elif isinstance(graph, spmatrix):
A = graph.transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = (A.multiply(TD)).sum() / A.sum()
m2 = (A.multiply(TD.power(2))).sum() / A.sum()
elif isinstance(graph, Graph):
A = adjacency_matrix(graph, weight=weight).transpose()
TD = sparse_forward_hierarchical_differences(graph, weight=weight).tocsc()
m = (A.multiply(TD)).sum() / A.sum()
m2 = (A.multiply(TD.power(2))).sum() / A.sum()
std = (m2 - m**2)**0.5
return TD, m, std
def backward_hierarchical_incoherence(graph, weight=None):
"""Returns the backward hierarchical differences over the edges of a network in the form of a weighted adjacency matrix,
mean of the distribution of differences and standard deviation of this distribution.
Parameters
----------
graph : Graph, array
A NetworkX graph or numpy/sparse array
weight : string
If you have weighted edges insert weight='string', where string is your underlying weight attribute. Only relevant if graph object is a networkx
graph instance. Otherwise the default is None.
Returns
-------
backward hierarchical | |
"""weighted_graph.py: This file is part of the feyncop/feyngen package.
Implements the WeightedGraph class. """
# See also: http://people.physik.hu-berlin.de/~borinsky/
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright (C) 2014 <NAME>"
__license__ = "MIT License"
__version__ = "1.0"
# Copyright (c) 2014 <NAME>
# This program is distributed under the MIT License:
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from math import *
import copy, itertools
from stuff import *
from graph import Graph
class WeightedGraph(Graph):
"""This class extends the basic utilities in the Graph class by the tools
to handle QED and Yang-Mills graphs."""
def __init__( self, edges, edge_weights, symmetry_factor=0 ):
"""Initializes the WeightedGraph class. Edges, edge_weights and
symmetry_factor can be provided."""
if len(edges) != len(edge_weights):
raise
super(WeightedGraph, self).__init__( edges, symmetry_factor )
self.edge_weights = edge_weights
def get_edge_str( self, e ):
"""Return a readable string of the edges of the graph."""
v1,v2 = self.edges[e]
w = self.edge_weights[e]
wDict = [ '0', 'f', 'A', 'c' ]
return "[%d,%d,%c]" % (v1,v2,wDict[w])
def get_edges_tuple( self ):
"""Get a unique tuple to identify the graph. (Unique only for every labeling)."""
return tuple( sorted( ( tuple( sorted(edge) if w==2 else edge ), w) for edge,w in zip(self.edges,self.edge_weights) ) )
def graph_from_sub_edges( self, sub_edges ):
"""Create a new graph from a sub set of its edges."""
sub_graph = super(WeightedGraph, self).graph_from_sub_edges( sub_edges )
sub_graph.edge_weights = tuple( self.edge_weights[e] for e in sorted(sub_edges) )
return sub_graph
def sub_edges_by_weight( self, weight ):
"""Returns all subedges with a certain weight."""
return frozenset( e for e,w in enumerate(self.edge_weights) if w == weight )
@property
def residue_type( self ):
"""Returns the residue type of the graph."""
def dir_e(e, v):
if self.edge_weights[e] == 2: return 1
if v == self.edges[e][0]: return -1
else: return 1
ext_types = [ dir_e(e,v) * self.edge_weights[e] for v in self.external_vtcs_set for e in self.adj_edges( v, self.edges_set ) ]
return tuple(sorted(ext_types))
def get_vtx_type( self, v ):
"""Returns the type of the vertex v in the same format as
residue_type."""
def dir1(e, v):
if self.edge_weights[e] == 2: return 1
if v == self.edges[e][0]: return -1
else: return 1
def dir2(e, v):
if self.edge_weights[e] == 2: return 1
if v == self.edges[e][0]: return 1
else: return -1
adj_types = [ dir1(e,v)*self.edge_weights[e] for e in self.adj_edges( v, self.edges_set ) ]
adj_types += [ dir2(e,v)*self.edge_weights[e] for e in self.edges_set if self.edges[e] == (v,v) ]
return tuple(sorted(adj_types))
def get_vtcs_coloring( self ):
"""Helper function: Calculate the vertex coloring in a format suitable
for the canonical labeling calculation."""
# All vertices with different numbers of selfloops of different type
# are colored in another way.
dictWeights = { edge : self.edge_weights[e] for e,edge in enumerate(self.edges) }
edge_degree_counter = self.edge_degree_counter(self.edges_set)
selfloop_degree_list = [ (edge_degree_counter[(v,v)],dictWeights[(v,v)] if edge_degree_counter[(v,v)] else 2) for v in self.internal_vtcs_set ]
# Sorting is important for the v even for all similar mul!
selfloop_multiplicity_list = sorted( (mul,v) for v, mul in zip(self.internal_vtcs_set, selfloop_degree_list) )
( ( max_selfloop_multiplicity, _), _ ) = selfloop_multiplicity_list[-1] if selfloop_multiplicity_list else ((0,2), 0)
self_loop_list = [ frozenset( vtx for mul, vtx in filter( lambda ((mul, we), vtx) : mul == i and we == w, selfloop_multiplicity_list ) ) for i in range( max_selfloop_multiplicity+1 ) for w in (1,2,3) ]
# External vertices all have the same color still.
return self_loop_list + [ self.external_vtcs_set ]
def get_edges_coloring( self, edges_set ):
"""Helper function: Calculate the edge coloring in a format suitable
for the canonical labeling calculation."""
# Fermions, bosons and ghosts need different color classes.
fermion_edges_set = self.sub_edges_by_weight(1) & edges_set
boson_edges_set = self.sub_edges_by_weight(2) & edges_set
ghost_edges_set = self.sub_edges_by_weight(3) & edges_set
fermion_edges = frozenset( self.edges[i] for i in fermion_edges_set if not self.is_selfloop(self.edges[i]) )
ghost_edges = frozenset( self.edges[i] for i in ghost_edges_set if not self.is_selfloop(self.edges[i]) )
boson_edges = frozenset( self.edges[i] for i in boson_edges_set )
# Fermions and ghosts need orientation. Bosons not!
# For higher performance some special cases of boson-fermion-ghost
# edge combinations are included.
normalize = lambda edge : (max(edge),min(edge))
flip = lambda (x,y) : (y,x)
fermion_loops = frozenset( normalize(edge) for edge in fermion_edges if flip(edge) in fermion_edges )
ghost_loops = frozenset( normalize(edge) for edge in ghost_edges if flip(edge) in ghost_edges )
reduced_fermion_edges = fermion_edges - fermion_loops - frozenset( flip(edge) for edge in fermion_loops )
reduced_ghost_edges = ghost_edges - ghost_loops - frozenset( flip(edge) for edge in ghost_loops )
boson_fermion_loops = frozenset( edge for edge in reduced_fermion_edges if flip(edge) in boson_edges or edge in boson_edges )
boson_ghost_loops = frozenset( edge for edge in reduced_ghost_edges if flip(edge) in boson_edges or edge in boson_edges )
reduced_boson_edges = boson_edges - boson_fermion_loops - frozenset( flip(edge) for edge in boson_fermion_loops ) - boson_ghost_loops - frozenset( flip(edge) for edge in boson_ghost_loops )
dbl_boson_edges = reduced_boson_edges | frozenset( flip(edge) for edge in reduced_boson_edges )
if len(dbl_boson_edges&reduced_fermion_edges) != 0 or \
len(dbl_boson_edges&reduced_ghost_edges) != 0:
print dbl_boson_edges, reduced_fermion_edges
raise
# Calculate the boson coloring as in the Graph class.
boson_coloring = super( WeightedGraph, self).get_edges_coloring( boson_edges_set )
return [ dbl_boson_edges | reduced_fermion_edges | reduced_ghost_edges,
fermion_loops, boson_fermion_loops, ghost_loops, boson_ghost_loops,
reduced_ghost_edges - boson_ghost_loops ] + boson_coloring[1:]
def get_trivial_symmetry_factor( self ):
"""Calculates the trivial factor in the symmetry factor. Only
considers edge multiplicity and self loops."""
grpSize = 1
boson_edges = self.sub_edges_by_weight(2)
edge_degree_counter = self.edge_degree_counter(boson_edges)
for mul_edge_deg in ( m for edge, m in edge_degree_counter.iteritems() if not self.is_selfloop(edge) ):
grpSize*= factorial(mul_edge_deg)
for selfloop_deg in ( m for edge, m in edge_degree_counter.iteritems() if self.is_selfloop(edge) ):
grpSize*= double_factorial(2*selfloop_deg)
return grpSize
def permute_external_edges( self ):
"""Generate all possible graphs with fixed external legs from the
graph provided that the graph is non-leg-fixed."""
class FixedGraph( type(self) ):
def get_vtcs_coloring( self ):
vtcs_coloring = super(FixedGraph, self).get_vtcs_coloring()
vtcs_coloring = [ c - self.external_vtcs_set for c in vtcs_coloring]
vtcs_coloring.extend( frozenset([v]) for v in sorted(self.external_vtcs_set) )
return vtcs_coloring
extern_boson_vtcs = \
frozenset( v for e in self.sub_edges_by_weight(2) for v in self.edges[e] ) \
& self.external_vtcs_set
extern_in_fermion_vtcs = \
frozenset( self.edges[e][0] for e in self.sub_edges_by_weight(1) ) \
& self.external_vtcs_set
extern_out_fermion_vtcs = \
frozenset( self.edges[e][1] for e in self.sub_edges_by_weight(1) ) \
& self.external_vtcs_set
extern_in_ghost_vtcs = \
frozenset( self.edges[e][0] for e in self.sub_edges_by_weight(3) ) \
& self.external_vtcs_set
extern_out_ghost_vtcs = \
frozenset( self.edges[e][1] for e in self.sub_edges_by_weight(3) ) \
& self.external_vtcs_set
extern_vtcs_list = list(extern_boson_vtcs) + \
list(extern_in_fermion_vtcs) + \
list(extern_out_fermion_vtcs) + \
list(extern_in_ghost_vtcs) + \
list(extern_out_ghost_vtcs)
if frozenset(extern_vtcs_list) != self.external_vtcs_set:
raise
vtcs_list = list(self.internal_vtcs_set) + \
extern_vtcs_list
for perm0 in itertools.permutations( extern_boson_vtcs ):
for perm1 in itertools.permutations( extern_in_fermion_vtcs ):
for perm2 in itertools.permutations( extern_out_fermion_vtcs ):
for perm3 in itertools.permutations( extern_in_ghost_vtcs ):
for perm4 in itertools.permutations( extern_out_ghost_vtcs ):
new_vtcs_list = tuple(self.internal_vtcs_set) + \
perm0 + perm1 + perm2 + perm3 + perm4
m = dict( zip( vtcs_list, new_vtcs_list ) )
def relabel_edge( (v1,v2) ):
return (m[v1], m[v2])
yield FixedGraph(
[ relabel_edge(edge) for edge in self.edges ], self.edge_weights, 0 )
@property
def clean_graph( self ):
"""Orders the edge- and weight list of the graph in a transparent manner."""
ext_sorter | |
cmdln, **kwargs):
self.cmdln = cmdln
kwargs["prog"] = self.cmdln.name
_OptionParserEx.__init__(self, **kwargs)
self.disable_interspersed_args()
def print_help(self, file=None):
self.cmdln.onecmd(["help"])
def error(self, msg):
raise CmdlnUserError(msg)
class SubCmdOptionParser(_OptionParserEx):
def set_cmdln_info(self, cmdln, subcmd):
"""Called by Cmdln to pass relevant info about itself needed
for print_help().
"""
self.cmdln = cmdln
self.subcmd = subcmd
def print_help(self, file=None):
self.cmdln.onecmd(["help", self.subcmd])
def error(self, msg):
raise CmdlnUserError(msg)
def option(*args, **kwargs):
"""Decorator to add an option to the optparser argument of a Cmdln
subcommand
To add a toplevel option, apply the decorator on the class itself. (see
p4.py for an example)
Example:
@cmdln.option("-E", dest="environment_path")
class MyShell(cmdln.Cmdln):
@cmdln.option("-f", "--force", help="force removal")
def do_remove(self, subcmd, opts, *args):
#...
"""
def decorate_sub_command(method):
"""create and add sub-command options"""
if not hasattr(method, "optparser"):
method.optparser = SubCmdOptionParser()
method.optparser.add_option(*args, **kwargs)
return method
def decorate_class(klass):
"""store toplevel options"""
assert _forgiving_issubclass(klass, Cmdln)
_inherit_attr(klass, "toplevel_optparser_options", [], cp=lambda l: l[:])
klass.toplevel_optparser_options.append( (args, kwargs) )
return klass
#XXX Is there a possible optimization for many options to not have a
# large stack depth here?
def decorate(obj):
if _forgiving_issubclass(obj, Cmdln):
return decorate_class(obj)
else:
return decorate_sub_command(obj)
return decorate
class Cmdln(RawCmdln):
"""An improved (on cmd.Cmd) framework for building multi-subcommand
scripts (think "svn" & "cvs") and simple shells (think "pdb" and
"gdb").
A simple example:
import cmdln
class MySVN(cmdln.Cmdln):
name = "svn"
@cmdln.aliases('stat', 'st')
@cmdln.option('-v', '--verbose', action='store_true'
help='print verbose information')
def do_status(self, subcmd, opts, *paths):
print "handle 'svn status' command"
#...
if __name__ == "__main__":
shell = MySVN()
retval = shell.main()
sys.exit(retval)
'Cmdln' extends 'RawCmdln' by providing optparse option processing
integration. See this class' _dispatch_cmd() docstring and general
cmdln document for more information.
"""
def _dispatch_cmd(self, handler, argv):
"""Introspect sub-command handler signature to determine how to
dispatch the command. The raw handler provided by the base
'RawCmdln' class is still supported:
def do_foo(self, argv):
# 'argv' is the vector of command line args, argv[0] is
# the command name itself (i.e. "foo" or an alias)
pass
In addition, if the handler has more than 2 arguments option
processing is automatically done (using optparse):
@cmdln.option('-v', '--verbose', action='store_true')
def do_bar(self, subcmd, opts, *args):
# subcmd = <"bar" or an alias>
# opts = <an optparse.Values instance>
if opts.verbose:
print "lots of debugging output..."
# args = <tuple of arguments>
for arg in args:
bar(arg)
TODO: explain that "*args" can be other signatures as well.
The `cmdln.option` decorator corresponds to an `add_option()`
method call on an `optparse.OptionParser` instance.
You can declare a specific number of arguments:
@cmdln.option('-v', '--verbose', action='store_true')
def do_bar2(self, subcmd, opts, bar_one, bar_two):
#...
and an appropriate error message will be raised/printed if the
command is called with a different number of args.
"""
co_argcount = handler.__func__.__code__.co_argcount
if co_argcount == 2: # handler ::= do_foo(self, argv)
return handler(argv)
elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...)
try:
optparser = handler.optparser
except AttributeError:
optparser = handler.__func__.optparser = SubCmdOptionParser()
assert isinstance(optparser, SubCmdOptionParser)
# apply subcommand options' defaults from config files, if any.
subcmd = handler.__name__.split('do_', 1)[1]
optparser.set_defaults(**self.get_option_defaults(subcmd))
optparser.set_cmdln_info(self, argv[0])
try:
opts, args = optparser.parse_args(argv[1:])
except StopOptionProcessing:
#TODO: this doesn't really fly for a replacement of
# optparse.py behaviour, does it?
return 0 # Normal command termination
try:
return handler(argv[0], opts, *args)
except TypeError:
_, ex, _ = sys.exc_info()
# Some TypeError's are user errors:
# do_foo() takes at least 4 arguments (3 given)
# do_foo() takes at most 5 arguments (6 given)
# do_foo() takes exactly 5 arguments (6 given)
# do_foo() takes exactly 5 positional arguments (6 given)
# Raise CmdlnUserError for these with a suitably
# massaged error message.
tb = sys.exc_info()[2] # the traceback object
if tb.tb_next is not None:
# If the traceback is more than one level deep, then the
# TypeError do *not* happen on the "handler(...)" call
# above. In that we don't want to handle it specially
# here: it would falsely mask deeper code errors.
raise
msg = ex.args[0]
match = _INCORRECT_NUM_ARGS_RE.search(msg)
if match:
msg = list(match.groups())
msg[1] = int(msg[1]) - 3
if msg[1] == 1:
msg[2] = msg[2].replace("arguments", "argument")
msg[3] = int(msg[3]) - 3
msg = ''.join(map(str, msg))
raise CmdlnUserError(msg)
else:
raise
else:
raise CmdlnError("incorrect argcount for %s(): takes %d, must "
"take 2 for 'argv' signature or 3+ for 'opts' "
"signature" % (handler.__name__, co_argcount))
#---- support for generating `man` page output from a Cmdln class
def man_sections_from_cmdln(inst, summary=None, description=None, author=None):
"""Return man page sections appropriate for the given Cmdln instance.
Join these sections for man page content.
The man page sections generated are:
NAME
SYNOPSIS
DESCRIPTION (if `description` is given)
OPTIONS
COMMANDS
HELP TOPICS (if any)
@param inst {Cmdln} Instance of Cmdln subclass for which to generate
man page content.
@param summary {str} A one-liner summary of the command.
@param description {str} A description of the command. If given,
it will be used for a "DESCRIPTION" section.
@param author {str} The author name and email for the AUTHOR secion
of the man page.
@raises {ValueError} if man page content cannot be generated for the
given class.
"""
if not inst.__class__.name:
raise ValueError("cannot generate man page content: `name` is not "
"set on class %r" % inst.__class__)
data = {
"name": inst.name,
"ucname": inst.name.upper(),
"date": datetime.date.today().strftime("%b %Y"),
"cmdln_version": __version__,
"version_str": inst.version and " %s" % inst.version or "",
"summary_str": summary and r" \- %s" % summary or "",
}
sections = []
sections.append('.\\" Automatically generated by cmdln %(cmdln_version)s\n'
'.TH %(ucname)s "1" "%(date)s" "%(name)s%(version_str)s" "User Commands"\n'
% data)
sections.append(".SH NAME\n%(name)s%(summary_str)s\n" % data)
sections.append(_dedent(r"""
.SH SYNOPSIS
.B %(name)s
[\fIGLOBALOPTS\fR] \fISUBCOMMAND \fR[\fIOPTS\fR] [\fIARGS\fR...]
.br
.B %(name)s
\fIhelp SUBCOMMAND\fR
""") % data)
if description:
sections.append(".SH DESCRIPTION\n%s\n" % description)
section = ".SH OPTIONS\n"
if not hasattr(inst, "optparser") is None:
#HACK: In case `.main()` hasn't been run.
inst.optparser = inst.get_optparser()
lines = inst._help_preprocess("${option_list}", None).splitlines(False)
for line in lines[1:]:
line = line.lstrip()
if not line:
continue
section += ".TP\n"
opts, desc = line.split(' ', 1)
section += ".B %s\n" % opts
section += "%s\n" % _dedent(desc.lstrip(), skip_first_line=True)
sections.append(section)
section = ".SH COMMANDS\n"
cmds = inst._get_cmds_data()
for cmdstr, doc in cmds:
cmdname = cmdstr.split(' ')[0] # e.g. "commit (ci)" -> "commit"
doc = inst._help_reindent(doc, indent="")
doc = inst._help_preprocess(doc, cmdname)
doc = doc.rstrip() + "\n" # trim down trailing space
section += '.PP\n.SS %s\n%s\n' % (cmdstr, doc)
sections.append(section)
help_names = inst._get_help_names()
if help_names:
section = ".SH HELP TOPICS\n"
for help_name, help_meth in sorted(help_names.items()):
help = help_meth(inst)
help = inst._help_reindent(help, indent="")
section += '.PP\n.SS %s\n%s\n' % (help_name, help)
sections.append(section)
if author:
sections.append(".SH AUTHOR\n%s\n" % author)
return sections
#---- internal support functions
def _inherit_attr(klass, attr, default, cp):
"""Inherit the attribute from the base class
Copy `attr` from base class (otherwise use `default`). Copying is done using
the passed `cp` function.
The motivation behind writing this function is to allow inheritance among
Cmdln classes where base classes set 'common' options using the
`@cmdln.option` decorator. To ensure this, we must not write to the base
class's options when handling the derived class.
"""
if attr not in klass.__dict__:
if hasattr(klass, attr):
value = cp(getattr(klass, attr))
else:
value = default
setattr(klass, attr, value)
def _forgiving_issubclass(derived_class, base_class):
"""Forgiving version of ``issubclass``
Does not throw any exception when arguments are not of class type
"""
return (type(derived_class) is ClassType and \
type(base_class) is ClassType and \
issubclass(derived_class, base_class))
def _format_linedata(linedata, indent, indent_width):
"""Format specific linedata into a pleasant layout.
"linedata" is a list of 2-tuples of the form:
(<item-display-string>, <item-docstring>)
"indent" is a string to use for one level of indentation
"indent_width" is a number of columns by which the
formatted data will be indented when printed.
The <item-display-string> column is held to 30 columns.
"""
lines = []
WIDTH = 78 - indent_width
SPACING = 2
NAME_WIDTH_LOWER_BOUND = 13
NAME_WIDTH_UPPER_BOUND = 30
NAME_WIDTH = max([len(s) for s,d in linedata])
if NAME_WIDTH < NAME_WIDTH_LOWER_BOUND:
NAME_WIDTH = NAME_WIDTH_LOWER_BOUND
elif NAME_WIDTH > NAME_WIDTH_UPPER_BOUND:
NAME_WIDTH = NAME_WIDTH_UPPER_BOUND
DOC_WIDTH = WIDTH - NAME_WIDTH | |
<filename>code/utils/data.py
"""
Created on Sun Mar 3 21:42:16 2019
@author: <NAME>
"""
import os
import sys
import gdal
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
from imblearn.under_sampling import RandomUnderSampler, TomekLinks, ClusterCentroids, EditedNearestNeighbours, RepeatedEditedNearestNeighbours, AllKNN, CondensedNearestNeighbour, OneSidedSelection, NeighbourhoodCleaningRule, InstanceHardnessThreshold, NearMiss
from imblearn.over_sampling import SMOTE, RandomOverSampler
from imblearn.combine import SMOTETomek
from imblearn.ensemble import RUSBoostClassifier
from scipy import stats
import scipy.signal
from collections import Counter
from tqdm import tqdm
# inicialize data location
DATA_FOLDER = "../sensing_data/"
ROI = "bigsquare/"
DS_FOLDER = DATA_FOLDER + "clipped/" + ROI
TS_FOLDER = DS_FOLDER + "tstats/"
TS1_FOLDER = DS_FOLDER + "t1stats/"
STATIC_FOLDER = DS_FOLDER + "static/"
CACHE_FOLDER = DS_FOLDER + "cache/"
# Class to text for plotting features
def feature_map(u):
src_dss = [f for f in os.listdir(DS_FOLDER) if (
"cos" not in f) and ("xml" not in f) and ("_" in f)]
ts_dss = [f for f in os.listdir(TS_FOLDER) if (
"cos" not in f) and ("xml" not in f) and ("_" in f)]
ts1_dss = [f for f in os.listdir(TS1_FOLDER) if (
"cos" not in f) and ("xml" not in f) and ("_" in f)]
src_dss = src_dss + ts_dss + ts1_dss
src_dss.sort()
text_classes = dict(zip(range(len(src_dss)), src_dss))
return np.array([text_classes[x] for x in u])
# Class to text for plotting and analysis, only works if map_classes = True
def reverse_road_class_map(u):
text_classes = {
1: "Edificação artificial permanente",
2: "Estradas",
3: "Natural",
4: "Água",
}
return np.array([text_classes[x] for x in u])
# Class to text for plotting and analysis, only works if map_classes = True
def reverse_class_map(u):
text_classes = {
1: "Edificação artificial permanente",
2: "Natural",
3: "Água",
}
return np.array([text_classes[x] for x in u])
def _army_map(X):
return X
def _class_map(x):
if x >= 1 and x <= 13:
return 1
elif x > 13 and x <= 42:
return 2
elif x > 42 and x <= 48:
return 3
return 2
def _class_map_ua(x):
if x >= 1 and x <= 7:
#built up
return 0
return 1
def _class_split_map_ua(x):
if x >= 1 and x <= 3:
#dense fabric
return 1
if x > 3 and x <= 6:
#less dense fabric
return 2
elif (x > 6 and x <= 14) or (x == 17):
# roads and other structures 17 sport
return 3
elif x > 25 and x <= 27:
# water bodies
return 5
return 4 # non built up
def _class_split_map_ua_binary(x):
if x >= 1 and x <= 3:
#urban fabric
return 1
return 0 # non built up
def _class_split_map(x):
if x == 1:
return 1
if x == 2:
return 2
if x >= 3 and x <= 13:
return 3
elif x > 13 and x <= 42:
return 4
elif x > 42 and x <= 48:
return 5
return 4
def _road_and_map(x):
if x == 4:
return 2
if x >= 1 and x <= 13:
return 1
elif x > 13 and x <= 42:
return 3
elif x > 42 and x <= 48:
return 4
return 3
def _road_map(x): # roads vs all
if x == 4:
return 1
elif x > 42 and x <= 48:
return 3
return 2
def _class_map_binary(x):
if (x >= 1 and x <= 3) or x == 5:
return 1
return 0
def get_features():
src_dss = [f for f in os.listdir(DS_FOLDER) if (
"cos" not in f) and ("xml" not in f) and ("_" in f)]
ts_dss = [f for f in os.listdir(TS_FOLDER) if (
"cos" not in f) and ("xml" not in f) and ("_" in f)]
ts1_dss = [f for f in os.listdir(TS1_FOLDER) if (
"cos" not in f) and ("xml" not in f) and ("_" in f)]
src_dss = src_dss + ts_dss + ts1_dss
src_dss.sort()
return np.array(src_dss)
def load_prediction(datafiles=None, ratio=1, znorm=False, normalize=False, map_classes=False, urban_atlas=False, binary=False, osm_roads=False, army_gt=False, split_struct=False, gt_raster="cos_ground.tiff"):
print("Prediction data: Loading...")
if(datafiles is None):
src_dss = [DS_FOLDER + f for f in os.listdir(DS_FOLDER) if (
'ground' not in f) and ("xml" not in f) and ("_" in f) and ("decis" not in f)]
ts_dss = [TS_FOLDER + f for f in os.listdir(TS_FOLDER) if (
'ground' not in f) and ("xml" not in f) and ("_" in f)]
ts1_dss = [TS1_FOLDER + f for f in os.listdir(TS1_FOLDER) if (
'ground' not in f) and ("xml" not in f) and ("_" in f)]
src_dss = src_dss + ts_dss + ts1_dss
else:
src_dss = datafiles
src_dss.sort()
print("SRC Images: ", src_dss)
refDs = gdal.Open(gt_raster, gdal.GA_ReadOnly)
band = refDs.GetRasterBand(1).ReadAsArray()
shape = tuple([int(ratio*i) for i in band.shape])
try:
print("Trying to load cached data...")
X = np.load(CACHE_FOLDER + "pred_data.npy")
print("Using cached data...")
except Exception:
print("Failed to load cached data...")
print("Reconstructing data...")
X = []
print("Datasets: Loading...")
for raster in tqdm(src_dss):
# Open raster dataset
raster_ds = gdal.Open(raster, gdal.GA_ReadOnly)
n_bands = raster_ds.RasterCount
# Extract band's data and transform into a numpy array
for band in range(1, n_bands+1):
test_ds = raster_ds.GetRasterBand(band).ReadAsArray()
test_ds = test_ds[:shape[0], :shape[1]]
X.append(test_ds.flatten())
print("Transposing data...")
# Transpose attributes matrix
X = np.dstack(tuple(X))[0]
X = X.astype(np.float32)
X[~np.isfinite(X)] = -1
print("Saving data to file cache...")
np.save(CACHE_FOLDER + "pred_data.npy", X)
if normalize:
X = normalize.transform(X)
elif znorm:
print("Z-Normalization: Loading...")
X = stats.zscore(X, axis=1)
print("Done!")
labelDS = gdal.Open(gt_raster, gdal.GA_ReadOnly)
y = labelDS.GetRasterBand(1)
y = y.ReadAsArray()[:shape[0], :shape[1]].flatten()
maping_f = _class_map
if binary:
maping_f = _class_map_binary
if osm_roads:
labelDS = gdal.Open(
DS_FOLDER + "roads_cos_50982.tiff", gdal.GA_ReadOnly)
roads = labelDS.GetRasterBand(1).ReadAsArray()[
:shape[0], :shape[1]].flatten()
y[roads == 4] = roads[roads == 4]
maping_f = _road_and_map
if split_struct:
maping_f = _class_split_map
if army_gt:
maping_f = _army_map
if urban_atlas:
maping_f = _class_split_map_ua
print("Class Mapping: UA2018...")
if map_classes:
y = np.array([maping_f(yi) for yi in tqdm(y)])
print("Prediction data: Done!")
return X, y, shape
def load_timeseries(img_size):
ts_dss = [TS_FOLDER + f for f in os.listdir(TS_FOLDER) if (
"cos" not in f) and ("xml" not in f) and ("_" in f)]
ts1_dss = [TS1_FOLDER + f for f in os.listdir(TS1_FOLDER) if (
"cos" not in f) and ("xml" not in f) and ("_" in f)]
image_files = ts_dss + ts1_dss # Your list of files
image_height = img_size[0]
image_width = img_size[1]
# Create empty HxWxN array/matrix
image_stack = np.empty((image_height, image_width, len(image_files)))
for i, fname in enumerate(image_files):
# Extract band's data and transform into a numpy array
label_ds = gdal.Open(fname, gdal.GA_ReadOnly)
label_bands = label_ds.GetRasterBand(1).ReadAsArray()
image_stack[:, :, i] = label_bands # Set the i:th slice to this image
return image_files
def load(datafiles=None, normalize=False, znorm=False, map_classes=False, binary=False, test_size=0.2, osm_roads=False, army_gt=False, urban_atlas=False, split_struct=False, indexes=False, gt_raster="cos_ground.tif"):
try:
print("Trying to load cached data...")
X_train = np.load(CACHE_FOLDER + "train_data.npy")
y_train = np.load(CACHE_FOLDER + "train_labels.npy")
X_test = np.load(CACHE_FOLDER + "test_data.npy")
y_test = np.load(CACHE_FOLDER + "test_labels.npy")
X_val = np.load(CACHE_FOLDER + "val_data.npy")
y_val = np.load(CACHE_FOLDER + "val_labels.npy")
print("Using cached data...", X_train.shape)
normalizer = None
except Exception:
print("Failed to load cached data...")
print("Reconstructing data...")
if(datafiles is None):
src_dss = [DS_FOLDER + f for f in os.listdir(DS_FOLDER) if (
'ground' not in f) and ("xml" not in f) and ("_" in f) and ("decis" not in f)]
ts_dss = [TS_FOLDER + f for f in os.listdir(TS_FOLDER) if (
'ground' not in f) and ("xml" not in f) and ("_" in f)]
ts1_dss = [TS1_FOLDER + f for f in os.listdir(TS1_FOLDER) if (
'ground' not in f) and ("xml" not in f) and ("_" in f)]
src_dss = src_dss + ts_dss + ts1_dss
else:
src_dss = datafiles
src_dss.sort()
print("SRC Images: ", src_dss)
gt_ds = gdal.Open(
DS_FOLDER + gt_raster, gdal.GA_ReadOnly)
gt_bands = gt_ds.GetRasterBand(1)
gt_bands = gt_bands.ReadAsArray()[:, :]
ref_ds = gdal.Open(
DS_FOLDER + gt_raster, gdal.GA_ReadOnly)
ref_bands = ref_ds.GetRasterBand(1)
ref_bands = ref_bands.ReadAsArray()[:, :]
(unique, counts) = np.unique(gt_bands, return_counts=True)
frequencies = np.asarray((unique, counts)).T
print("Pre zero frequencies")
print(frequencies)
is_train = np.nonzero(ref_bands)
print("Pixels: ", gt_bands.size)
y = gt_bands[is_train].flatten()
(unique, counts) = np.unique( y, return_counts=True)
frequencies = np.asarray((unique, counts)).T
print("Real frequencies")
print(frequencies)
# Prepare training data (set of pixels used for training) and labels
# Create empty HxW array/matrix
# X = np.empty((len(src_dss), len(cos_bands[is_train])))
X = []
# Get list of raster bands info as array, already indexed by labels non zero
print("Datasets: | |
enabled
C{Status}.
"""
query_factory = mock_query_factory(payload.sample_s3_get_bucket_versioning_enabled_result)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"GET",
url_context=client.s3_url_context(self.endpoint, "mybucket", "?versioning"),
content_sha256=EMPTY_CONTENT_SHA256,
),
query_factory.details,
)
return passthrough
def check_results(versioning_config):
self.assertEquals(versioning_config.status, 'Enabled')
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.get_bucket_versioning_config("mybucket")
d.addCallback(check_query_args)
d.addCallback(check_results)
return d
def test_get_bucket_versioning_config_mfa_disabled(self):
"""
L{S3Client.get_bucket_versioning_config} creates a L{Query} to get a
bucket's versioning configuration. It parses the returned
C{VersioningConfiguration} XML document and returns a C{Deferred} that
requests the bucket's versioning configuration that has a disabled
C{MfaDelete}.
"""
query_factory = mock_query_factory(payload.sample_s3_get_bucket_versioning_mfa_disabled_result)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"GET",
url_context=client.s3_url_context(self.endpoint, "mybucket", "?versioning"),
content_sha256=EMPTY_CONTENT_SHA256,
),
query_factory.details,
)
return passthrough
def check_results(versioning_config):
self.assertEquals(versioning_config.mfa_delete, 'Disabled')
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.get_bucket_versioning_config("mybucket")
d.addCallback(check_query_args)
d.addCallback(check_results)
return d
def test_delete_bucket(self):
query_factory = mock_query_factory(None)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"DELETE",
url_context=client.s3_url_context(self.endpoint, "mybucket"),
content_sha256=EMPTY_CONTENT_SHA256,
),
query_factory.details,
)
return passthrough
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.delete_bucket("mybucket")
d.addCallback(check_query_args)
return d
def test_put_bucket_acl(self):
query_factory = mock_query_factory(payload.sample_access_control_policy_result)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"PUT",
url_context=client.s3_url_context(self.endpoint, "mybucket", "?acl"),
content_sha256=sha256(
payload.sample_access_control_policy_result
).hexdigest().decode("ascii"),
),
assoc(query_factory.details, body_producer=None),
)
return passthrough
def check_result(result):
self.assertIsInstance(result, AccessControlPolicy)
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
policy = AccessControlPolicy.from_xml(
payload.sample_access_control_policy_result)
d = s3.put_bucket_acl("mybucket", policy)
d.addCallback(check_query_args)
d.addCallback(check_result)
return d
def test_get_bucket_acl(self):
query_factory = mock_query_factory(payload.sample_access_control_policy_result)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"GET",
url_context=client.s3_url_context(self.endpoint, "mybucket", "?acl"),
content_sha256=EMPTY_CONTENT_SHA256,
),
query_factory.details,
)
return passthrough
def check_result(result):
self.assert_(isinstance(result, AccessControlPolicy))
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.get_bucket_acl("mybucket")
d.addCallback(check_query_args)
d.addCallback(check_result)
return d
def test_put_request_payment(self):
"""
L{S3Client.put_request_payment} creates a L{Query} to set payment
information. An C{RequestPaymentConfiguration} XML document is built
and sent to the endpoint and a C{Deferred} is returned that fires with
the results of the request.
"""
query_factory = mock_query_factory(None)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
xml = ("<RequestPaymentConfiguration "
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">\n'
" <Payer>Requester</Payer>\n"
"</RequestPaymentConfiguration>")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"PUT",
url_context=client.s3_url_context(self.endpoint, "mybucket", "?requestPayment"),
content_sha256=sha256(xml).hexdigest().decode("ascii"),
),
assoc(query_factory.details, body_producer=None),
)
return passthrough
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.put_request_payment("mybucket", "Requester")
d.addCallback(check_query_args)
return d
def test_get_request_payment(self):
"""
L{S3Client.get_request_payment} creates a L{Query} to get payment
information. It parses the returned C{RequestPaymentConfiguration}
XML document and returns a C{Deferred} that fires with the payer's
name.
"""
query_factory = mock_query_factory(payload.sample_request_payment)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"GET",
url_context=client.s3_url_context(self.endpoint, "mybucket", "?requestPayment"),
content_sha256=EMPTY_CONTENT_SHA256,
),
query_factory.details,
)
return passthrough
def check_request_payment(result):
self.assertEquals(result, "Requester")
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.get_request_payment("mybucket")
d.addCallback(check_query_args)
d.addCallback(check_request_payment)
return d
def test_put_object(self):
query_factory = mock_query_factory(None)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"PUT",
url_context=client.s3_url_context(self.endpoint, "mybucket", "objectname"),
headers=Headers({"content-type": ["text/plain"]}),
metadata={"key": "some meta data"},
amz_headers={
"acl": "public-read",
},
content_sha256=sha256(b"some data").hexdigest().decode("ascii"),
),
assoc(query_factory.details, body_producer=None),
)
return passthrough
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.put_object(
"mybucket", "objectname", "some data",
content_type="text/plain",
metadata={"key": "some meta data"},
amz_headers={"acl": "public-read"},
)
d.addCallback(check_query_args)
return d
def test_put_object_with_custom_body_producer(self):
query_factory = mock_query_factory(None)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"PUT",
url_context=client.s3_url_context(self.endpoint, "mybucket", "objectname"),
headers=Headers({"content-type": ["text/plain"]}),
metadata={"key": "some meta data"},
amz_headers={
"acl": "public-read",
},
body_producer=string_producer,
),
query_factory.details,
)
string_producer = StringBodyProducer("some data")
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.put_object(
"mybucket", "objectname",
content_type="text/plain",
metadata={"key": "some meta data"},
amz_headers={"acl": "public-read"},
body_producer=string_producer,
)
d.addCallback(check_query_args)
return d
def test_copy_object(self):
"""
L{S3Client.copy_object} creates a L{Query} to copy an object from one
bucket to another.
"""
query_factory = mock_query_factory(None)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"PUT",
url_context=client.s3_url_context(self.endpoint, "newbucket", "newobjectname"),
metadata={"key": "some meta data"},
amz_headers={
"copy-source": "/mybucket/objectname",
},
content_sha256=EMPTY_CONTENT_SHA256,
),
query_factory.details,
)
return passthrough
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.copy_object(
"mybucket", "objectname", "newbucket",
"newobjectname",
metadata={"key": "some meta data"},
)
d.addCallback(check_query_args)
return d
def test_get_object(self):
query_factory = mock_query_factory(None)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"GET",
url_context=client.s3_url_context(self.endpoint, "mybucket", "objectname"),
content_sha256=EMPTY_CONTENT_SHA256,
),
query_factory.details,
)
return passthrough
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.get_object("mybucket", "objectname")
d.addCallback(check_query_args)
return d
def test_head_object(self):
query_factory = mock_query_factory(None)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"HEAD",
url_context=client.s3_url_context(self.endpoint, "mybucket", "objectname"),
content_sha256=EMPTY_CONTENT_SHA256,
),
query_factory.details,
)
return passthrough
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.head_object("mybucket", "objectname")
d.addCallback(check_query_args)
return d
def test_delete_object(self):
query_factory = mock_query_factory(None)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"DELETE",
url_context=client.s3_url_context(self.endpoint, "mybucket", "objectname"),
content_sha256=EMPTY_CONTENT_SHA256,
),
query_factory.details,
)
return passthrough
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.delete_object("mybucket", "objectname")
d.addCallback(check_query_args)
return d
def test_put_object_acl(self):
query_factory = mock_query_factory(payload.sample_access_control_policy_result)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"PUT",
url_context=client.s3_url_context(self.endpoint, "mybucket", "myobject?acl"),
content_sha256=sha256(
payload.sample_access_control_policy_result
).hexdigest().decode("ascii"),
),
assoc(query_factory.details, body_producer=None),
)
return passthrough
def check_result(result):
self.assertIsInstance(result, AccessControlPolicy)
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
policy = AccessControlPolicy.from_xml(
payload.sample_access_control_policy_result)
d = s3.put_object_acl("mybucket", "myobject", policy)
d.addCallback(check_query_args)
d.addCallback(check_result)
return d
def test_get_object_acl(self):
query_factory = mock_query_factory(payload.sample_access_control_policy_result)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"GET",
url_context=client.s3_url_context(self.endpoint, "mybucket", "myobject?acl"),
content_sha256=EMPTY_CONTENT_SHA256,
),
query_factory.details,
)
return passthrough
def check_result(result):
self.assertIsInstance(result, AccessControlPolicy)
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.get_object_acl("mybucket", "myobject")
d.addCallback(check_query_args)
d.addCallback(check_result)
return d
def test_init_multipart_upload(self):
query_factory = mock_query_factory(payload.sample_s3_init_multipart_upload_result)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"POST",
url_context=client.s3_url_context(
self.endpoint, "example-bucket", "example-object?uploads",
),
amz_headers={
"acl": "public",
},
content_sha256=EMPTY_CONTENT_SHA256,
),
query_factory.details,
)
return passthrough
def check_result(result):
self.assert_(isinstance(result, MultipartInitiationResponse))
self.assertEqual(result.bucket, "example-bucket")
self.assertEqual(result.object_name, "example-object")
self.assertEqual(result.upload_id, "deadbeef")
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.init_multipart_upload("example-bucket", "example-object",
amz_headers={"acl": "public"})
d.addCallback(check_query_args)
d.addCallback(check_result)
return d
def test_upload_part(self):
query_factory = mock_query_factory(None)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"PUT",
url_context=client.s3_url_context(
self.endpoint, "example-bucket", "example-object?partNumber=3&uploadId=testid"
),
content_sha256=sha256(b"some data").hexdigest().decode("ascii"),
),
assoc(query_factory.details, body_producer=None),
)
return passthrough
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.upload_part(
"example-bucket", "example-object", "testid", 3, "some data",
)
d.addCallback(check_query_args)
return d
def test_complete_multipart_upload(self):
query_factory = mock_query_factory(payload.sample_s3_complete_multipart_upload_result)
def check_query_args(passthrough):
self.assertEqual(query_factory.credentials.access_key, "foo")
self.assertEqual(query_factory.credentials.secret_key, "bar")
xml = (
"<CompleteMultipartUpload>\n"
"<Part>\n<PartNumber>1</PartNumber>\n<ETag>a</ETag>\n"
"</Part>\n<Part>\n<PartNumber>2</PartNumber>\n"
"<ETag>b</ETag>\n</Part>\n</CompleteMultipartUpload>"
)
self.assertEqual(
RequestDetails(
service=b"s3",
region=REGION_US_EAST_1,
method=b"POST",
url_context=client.s3_url_context(
self.endpoint, "example-bucket", "example-object?uploadId=testid"
),
content_sha256=sha256(xml).hexdigest().decode("ascii"),
),
assoc(query_factory.details, body_producer=None),
)
return passthrough
def check_result(result):
self.assert_(isinstance(result, MultipartCompletionResponse))
self.assertEqual(result.bucket, "example-bucket")
self.assertEqual(result.object_name, "example-object")
self.assertEqual(result.location,
"http://example-bucket.s3.amazonaws.com/example-object")
self.assertEqual(result.etag,
'"3858f62230ac3c915f300c664312c11f-9"')
creds = AWSCredentials("foo", "bar")
s3 = client.S3Client(creds, query_factory=query_factory)
d = s3.complete_multipart_upload(
"example-bucket",
"example-object",
"testid", [(1, "a"), (2, "b")]
)
d.addCallback(check_query_args)
d.addCallback(check_result)
return d
class QueryTestCase(TestCase):
creds = AWSCredentials(access_key="fookeyid", secret_key="barsecretkey")
endpoint = AWSServiceEndpoint("https://choopy.s3.amazonaws.com/")
utc_instant = datetime.datetime(2015, 8, 30, 12, 36)
def fake_sign(self, headers, data, url_context, instant, method):
return "Authorization header"
def test_default_creation(self):
query = client.Query(action="PUT")
self.assertEquals(query.bucket, None)
self.assertEquals(query.object_name, None)
self.assertEquals(query.data, "")
self.assertEquals(query.content_type, None)
self.assertEquals(query.metadata, {})
def test_default_endpoint(self):
query = client.Query(action="PUT")
self.assertEquals(self.endpoint.host, "choopy.s3.amazonaws.com")
self.assertEquals(query.endpoint.host, "s3.amazonaws.com")
self.assertEquals(self.endpoint.method, "GET")
self.assertEquals(query.endpoint.method, "PUT")
def test_set_content_type_no_object_name(self):
query = client.Query(action="PUT")
query.set_content_type()
self.assertEquals(query.content_type, None)
def test_set_content_type(self):
query = client.Query(action="PUT", object_name="advicedog.jpg")
query.set_content_type()
self.assertEquals(query.content_type, "image/jpeg")
def test_set_content_type_with_content_type_already_set(self):
query = client.Query(
action="PUT", object_name="data.txt", content_type="text/csv")
query.set_content_type()
self.assertNotEquals(query.content_type, "text/plain")
self.assertEquals(query.content_type, "text/csv")
def test_get_headers(self):
query = client.Query(
action="GET", creds=self.creds, bucket="mystuff",
object_name="/images/thing.jpg")
headers = query.get_headers(self.utc_instant)
self.assertEquals(headers.get("Content-Type"), "image/jpeg")
self.assertEquals(
headers.get("x-amz-content-sha256"),
sha256(b"").hexdigest(),
)
self.assertEqual(headers.get("x-amz-date"), "20150830T123600Z")
self.assertTrue(
headers.get("Authorization").startswith("AWS4-HMAC-SHA256"))
self.assertTrue(len(headers.get("Authorization")) > 40)
def test_get_headers_with_data(self):
query = client.Query(
action="PUT", creds=self.creds, bucket="mystuff",
object_name="/images/thing.jpg", data="BINARY IMAGE DATA")
headers = query.get_headers(self.utc_instant)
self.assertEquals(headers.get("Content-Type"), "image/jpeg")
self.assertEqual(headers.get("x-amz-date"), "20150830T123600Z")
self.assertTrue(
headers.get("Authorization").startswith("AWS4-HMAC-SHA256"))
self.assertTrue(len(headers.get("Authorization")) > 40)
def test_sign(self):
query = client.Query(action="PUT", creds=self.creds, data="data")
signed = query.sign(headers={"x-amz-date": "20150830T123600Z"},
data="some data",
url_context=client.URLContext(query.endpoint,
query.bucket,
query.object_name),
instant=self.utc_instant,
method=query.action)
self.assertEquals(
signed,
'AWS4-HMAC-SHA256 '
'Credential=fookeyid/20150830/us-east-1/s3/aws4_request, '
'SignedHeaders=host;x-amz-date, '
'Signature=99e8224887926c76e8e3053cf10f26249798fe2274d717b7d28e6ef'
'3311d1735')
def test_object_query(self):
"""
Test that a request addressing an object is created correctly.
"""
DATA = b"objectData"
DIGEST = sha256(DATA).hexdigest()
request = client.Query(
action="PUT", bucket="somebucket", object_name="object/name/here",
data=DATA, content_type="text/plain", metadata={"foo": "bar"},
amz_headers={"acl": "public-read"}, creds=self.creds,
endpoint=self.endpoint)
request.sign = self.fake_sign
self.assertEqual(request.action, "PUT")
headers = request.get_headers(self.utc_instant)
self.assertNotEqual(headers.pop("x-amz-date"), "")
self.assertEqual(headers,
{"Authorization": "Authorization header",
"Content-Type": "text/plain",
"x-amz-content-sha256": DIGEST,
"x-amz-meta-foo": "bar",
"x-amz-acl": "public-read"})
self.assertEqual(request.data, "objectData")
def test_bucket_query(self):
"""
Test that a request addressing a bucket is created correctly.
"""
DIGEST = ("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b785"
"2b855")
query = client.Query(
action="GET", bucket="somebucket", creds=self.creds,
endpoint=self.endpoint)
query.sign = self.fake_sign
self.assertEqual(query.action, "GET")
headers = query.get_headers(self.utc_instant)
self.assertNotEqual(headers.pop("x-amz-date"), "")
self.assertEqual(
headers, {
"Authorization": "Authorization header",
"x-amz-content-sha256": DIGEST})
self.assertEqual(query.data, "")
def test_submit(self):
"""
Submitting the request should invoke getPage correctly.
"""
class | |
nan, 2700, 900, nan, 0.73, nan ],
[ nan, 3000, 1000, nan, 0.99, nan ],
[ nan, 6000, 2000, nan, 4.82, nan ],
[ nan, 9000, 3000, nan, 12.88, nan ],
[ nan, 12000, 4000, nan, 27.43, nan ],
[ nan, 15000, 5000, nan, 46.47, nan ],
[ nan, 18000, 6000, nan, 78.11, nan ],
[ nan, 21000, 7000, nan, 117.80, nan ],
[ nan, 24000, 8000, nan, 158.32, nan ],
[ nan, 27000, 9000, nan, 228.24, nan ],
[ nan, 100, 300, nan, 0.02, nan ],
[ nan, 200, 600, nan, 0.08, nan ],
[ nan, 300, 900, nan, 0.09, nan ],
[ nan, 400, 1200, nan, 0.16, nan ],
[ nan, 500, 1500, nan, 0.25, nan ],
[ nan, 600, 1800, nan, 0.39, nan ],
[ nan, 700, 2100, nan, 0.52, nan ],
[ nan, 800, 2400, nan, 0.68, nan ],
[ nan, 900, 2700, nan, 0.83, nan ],
[ nan, 1000, 3000, nan, 1.11, nan ],
[ nan, 2000, 6000, nan, 6.02, nan ],
[ nan, 3000, 9000, nan, 15.63, nan ],
[ nan, 4000, 12000, nan, 31.95, nan ],
[ nan, 5000, 15000, nan, 55.06, nan ],
[ nan, 6000, 18000, nan, 101.32, nan ],
[ nan, 7000, 21000, nan, 139.57, nan ],
[ nan, 8000, 24000, nan, 272.79, nan ],
[ nan, 9000, 27000, nan, 289.58, nan ],
[ nan, 10000, 100, nan, 0.07, nan ],
[ nan, 20000, 200, nan, 0.29, nan ],
[ nan, 30000, 300, nan, 0.47, nan ],
[ nan, 40000, 400, nan, 1.23, nan ],
[ nan, 50000, 500, nan, 1.79, nan ],
[ nan, 60000, 600, nan, 2.65, nan ],
[ nan, 70000, 700, nan, 3.98, nan ],
[ nan, 80000, 800, nan, 5.43, nan ],
[ nan, 90000, 900, nan, 8.07, nan ],
[ nan, 100000, 1000, nan, 10.53, nan ],
[ nan, 200000, 2000, nan, 72.33, nan ],
[ nan, 100, 10000, nan, 0.06, nan ],
[ nan, 200, 20000, nan, 0.39, nan ],
[ nan, 300, 30000, nan, 0.62, nan ],
[ nan, 400, 40000, nan, 1.17, nan ],
[ nan, 500, 50000, nan, 3.24, nan ],
[ nan, 600, 60000, nan, 4.11, nan ],
[ nan, 700, 70000, nan, 5.45, nan ],
[ nan, 800, 80000, nan, 7.34, nan ],
[ nan, 900, 90000, nan, 8.96, nan ],
[ nan, 1000, 100000, nan, 14.89, nan ],
[ nan, 2000, 200000, nan, 100.00, nan ],
])
# numactl --interleave=all ./testing_dgesdd -UN -VN -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000 -N 300,100 -N 600,200 -N 900,300 -N 1200,400 -N 1500,500 -N 1800,600 -N 2100,700 -N 2400,800 -N 2700,900 -N 3000,1000 -N 6000,2000 -N 9000,3000 -N 12000,4000 -N 15000,5000 -N 18000,6000 -N 21000,7000 -N 24000,8000 -N 27000,9000 -N 100,300 -N 200,600 -N 300,900 -N 400,1200 -N 500,1500 -N 600,1800 -N 700,2100 -N 800,2400 -N 900,2700 -N 1000,3000 -N 2000,6000 -N 3000,9000 -N 4000,12000 -N 5000,15000 -N 6000,18000 -N 7000,21000 -N 8000,24000 -N 9000,27000 -N 10000,100 -N 20000,200 -N 30000,300 -N 40000,400 -N 50000,500 -N 60000,600 -N 70000,700 -N 80000,800 -N 90000,900 -N 100000,1000 -N 200000,2000 -N 100,10000 -N 200,20000 -N 300,30000 -N 400,40000 -N 500,50000 -N 600,60000 -N 700,70000 -N 800,80000 -N 900,90000 -N 1000,100000 -N 2000,200000
dgesdd_UN = array([
[ nan, 10, 10, nan, 0.00, nan ],
[ nan, 20, 20, nan, 0.00, nan ],
[ nan, 30, 30, nan, 0.00, nan ],
[ nan, 40, 40, nan, 0.00, nan ],
[ nan, 50, 50, nan, 0.00, nan ],
[ nan, 60, 60, nan, 0.00, nan ],
[ nan, 70, 70, nan, 0.00, nan ],
[ nan, 80, 80, nan, 0.00, nan ],
[ nan, 90, 90, nan, 0.00, nan ],
[ nan, 100, 100, nan, 0.00, nan ],
[ nan, 200, 200, nan, 0.01, nan ],
[ nan, 300, 300, nan, 0.03, nan ],
[ nan, 400, 400, nan, 0.05, nan ],
[ nan, 500, 500, nan, 0.07, nan ],
[ nan, 600, 600, nan, 0.09, nan ],
[ nan, 700, 700, nan, 0.12, nan ],
[ nan, 800, 800, nan, 0.15, nan ],
[ nan, 900, 900, nan, 0.19, nan ],
[ nan, 1000, 1000, nan, 0.23, nan ],
[ nan, 2000, 2000, nan, 0.91, nan ],
[ nan, 3000, 3000, nan, 2.35, nan ],
[ nan, 4000, 4000, nan, 4.73, nan ],
[ nan, 5000, 5000, nan, 8.37, nan ],
[ nan, 6000, 6000, nan, 13.39, nan ],
[ nan, 7000, 7000, nan, 20.14, nan ],
[ nan, 8000, 8000, nan, 28.75, nan ],
[ nan, 9000, 9000, nan, 39.93, nan ],
[ nan, 10000, 10000, nan, 53.33, nan ],
[ nan, 12000, 12000, nan, 88.78, nan ],
[ nan, 14000, 14000, nan, 136.82, nan ],
[ nan, 16000, 16000, nan, 201.80, nan ],
[ nan, 18000, 18000, nan, 285.32, nan ],
[ nan, 20000, 20000, nan, 385.63, nan ],
[ nan, 300, 100, nan, 0.00, nan ],
[ nan, 600, 200, nan, 0.02, nan ],
[ nan, 900, 300, nan, 0.03, nan ],
[ nan, 1200, 400, nan, 0.06, nan ],
[ nan, 1500, 500, nan, 0.08, nan ],
[ nan, 1800, 600, nan, 0.12, nan ],
[ nan, 2100, 700, nan, 0.15, nan ],
[ nan, 2400, 800, nan, 0.20, nan ],
[ nan, 2700, 900, nan, 0.25, nan ],
[ nan, 3000, 1000, nan, 0.31, nan ],
[ nan, 6000, 2000, nan, 1.39, nan ],
[ nan, 9000, 3000, nan, 3.34, nan ],
[ nan, 12000, 4000, nan, 6.83, nan ],
[ nan, 15000, 5000, nan, 12.18, nan ],
[ nan, 18000, 6000, nan, 19.68, nan ],
[ nan, 21000, 7000, nan, 29.76, nan ],
[ nan, 24000, 8000, nan, 43.26, nan ],
[ nan, 27000, 9000, nan, 59.81, nan ],
[ nan, 100, 300, nan, 0.00, nan ],
[ nan, 200, 600, nan, 0.02, nan ],
[ nan, 300, 900, nan, 0.04, nan ],
[ nan, 400, 1200, nan, 0.06, nan ],
[ nan, 500, 1500, nan, 0.10, nan ],
[ nan, 600, 1800, nan, 0.13, nan ],
[ nan, 700, 2100, nan, 0.17, nan ],
[ nan, 800, 2400, nan, 0.21, nan ],
[ nan, 900, 2700, nan, 0.27, nan ],
[ nan, 1000, 3000, nan, 0.32, nan ],
[ nan, 2000, 6000, nan, 1.35, nan ],
[ nan, 3000, 9000, nan, 3.57, nan ],
[ nan, 4000, 12000, nan, 7.31, nan ],
[ nan, 5000, 15000, nan, 12.97, nan ],
[ nan, 6000, 18000, nan, 21.20, nan ],
[ nan, 7000, 21000, nan, 31.85, nan ],
[ nan, 8000, 24000, nan, 45.32, nan ],
[ nan, 9000, 27000, nan, 62.50, nan ],
[ nan, 10000, 100, nan, 0.01, nan ],
[ nan, 20000, 200, nan, 0.06, nan ],
[ nan, 30000, 300, nan, 0.15, nan ],
[ nan, 40000, 400, nan, 0.38, nan ],
[ nan, 50000, 500, nan, 0.61, nan ],
[ nan, 60000, 600, nan, 0.93, nan ],
[ nan, 70000, 700, nan, 1.30, nan ],
[ nan, 80000, 800, nan, 1.77, nan ],
[ nan, 90000, 900, nan, 2.40, nan ],
[ nan, 100000, 1000, nan, 3.25, nan ],
[ nan, 200000, 2000, nan, 18.60, nan ],
[ nan, 100, 10000, nan, 0.01, nan ],
[ nan, 200, 20000, nan, 0.05, nan ],
[ nan, 300, 30000, nan, 0.16, nan ],
[ nan, 400, 40000, nan, 0.33, nan ],
[ nan, 500, 50000, nan, 0.59, nan ],
[ nan, 600, 60000, nan, 0.91, nan ],
[ nan, 700, 70000, nan, 1.44, nan ],
[ nan, 800, 80000, nan, 1.92, nan ],
[ nan, 900, 90000, nan, 2.31, nan ],
[ nan, 1000, 100000, nan, 2.98, nan ],
[ nan, 2000, 200000, nan, 20.24, nan ],
])
# numactl --interleave=all ./testing_dgesdd -US -VS -N 100 -N 1000 --range 10:90:10 --range 100:900:100 --range 1000:9000:1000 --range 10000:20000:2000 -N 300,100 -N 600,200 -N 900,300 -N 1200,400 -N 1500,500 -N 1800,600 -N 2100,700 -N 2400,800 -N 2700,900 -N 3000,1000 -N 6000,2000 -N 9000,3000 -N 12000,4000 -N 15000,5000 -N 18000,6000 -N 21000,7000 -N 24000,8000 -N 27000,9000 -N 100,300 -N 200,600 -N 300,900 -N 400,1200 -N 500,1500 -N 600,1800 -N 700,2100 -N 800,2400 -N 900,2700 -N 1000,3000 -N 2000,6000 -N 3000,9000 -N 4000,12000 -N 5000,15000 -N 6000,18000 -N 7000,21000 -N 8000,24000 -N 9000,27000 -N 10000,100 -N 20000,200 -N 30000,300 -N 40000,400 -N 50000,500 -N 60000,600 -N 70000,700 -N 80000,800 -N 90000,900 -N 100000,1000 -N 200000,2000 -N 100,10000 -N 200,20000 -N 300,30000 -N 400,40000 -N 500,50000 -N 600,60000 -N 700,70000 -N 800,80000 -N 900,90000 -N 1000,100000 -N 2000,200000
dgesdd_US = array([
[ nan, 10, 10, nan, 0.00, nan ],
[ nan, 20, 20, nan, 0.00, nan ],
[ nan, 30, 30, nan, 0.00, nan ],
[ nan, 40, | |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Methods for cubic equations of state.
Currently only supports liquid and vapor phases
"""
import os
from enum import Enum
from pyomo.environ import (exp,
Expression,
ExternalFunction,
log,
Param,
Reals,
sqrt,
Var)
from pyomo.common.config import ConfigBlock, ConfigValue, In
from idaes.core.util.exceptions import PropertyNotSupportedError
from idaes.generic_models.properties.core.generic.utility import (
get_method, get_component_object as cobj)
from idaes.core.util.math import safe_log
from .eos_base import EoSBase
from idaes import bin_directory
import idaes.logger as idaeslog
from idaes.core.util.exceptions import BurntToast, ConfigurationError
# Set up logger
_log = idaeslog.getLogger(__name__)
# Set path to root finder .so file
_so = os.path.join(bin_directory, "cubic_roots.so")
def cubic_roots_available():
"""Make sure the compiled cubic root functions are available. Yes, in
Windows the .so extention is still used.
"""
return os.path.isfile(_so)
class CubicType(Enum):
PR = 0
SRK = 1
class MixingRuleA(Enum):
default = 0
class MixingRuleB(Enum):
default = 0
EoS_param = {
CubicType.PR: {'u': 2, 'w': -1, 'omegaA': 0.45724, 'coeff_b': 0.07780},
CubicType.SRK: {'u': 1, 'w': 0, 'omegaA': 0.42748, 'coeff_b': 0.08664}
}
CubicConfig = ConfigBlock()
CubicConfig.declare("type", ConfigValue(
domain=In(CubicType),
description="Equation of state to use",
doc="Enum indicating type of cubic equation of state to use."))
class Cubic(EoSBase):
@staticmethod
def common(b, pobj):
ctype = pobj._cubic_type
cname = pobj.config.equation_of_state_options["type"].name
if hasattr(b, cname+"_fw"):
# Common components already constructed by previous phase
return
# Create expressions for coefficients
def func_fw(m, j):
cobj = m.params.get_component(j)
if ctype == CubicType.PR:
return 0.37464 + 1.54226*cobj.omega - \
0.26992*cobj.omega**2
elif ctype == CubicType.SRK:
return 0.48 + 1.574*cobj.omega - \
0.176*cobj.omega**2
else:
raise BurntToast(
"{} received unrecognized cubic type. This should "
"never happen, so please contact the IDAES developers "
"with this bug.".format(b.name))
b.add_component(cname+'_fw',
Expression(b.component_list,
rule=func_fw,
doc='EoS S factor'))
def func_a(m, j):
cobj = m.params.get_component(j)
fw = getattr(m, cname+"_fw")
return (EoS_param[ctype]['omegaA']*(
(Cubic.gas_constant(b) *
cobj.temperature_crit)**2/cobj.pressure_crit) *
((1+fw[j]*(1-sqrt(m.temperature /
cobj.temperature_crit)))**2))
b.add_component(cname+'_a',
Expression(b.component_list,
rule=func_a,
doc='Component a coefficient'))
def func_b(m, j):
cobj = m.params.get_component(j)
return (EoS_param[ctype]['coeff_b'] * Cubic.gas_constant(b) *
cobj.temperature_crit/cobj.pressure_crit)
b.add_component(cname+'_b',
Expression(b.component_list,
rule=func_b,
doc='Component b coefficient'))
def rule_am(m, p):
try:
rule = m.params.get_phase(p).config.equation_of_state_options[
"mixing_rule_a"]
except KeyError:
rule = MixingRuleA.default
a = getattr(m, cname+"_a")
if rule == MixingRuleA.default:
return rule_am_default(m, cname, a, p)
else:
raise ConfigurationError(
"{} Unrecognized option for Equation of State "
"mixing_rule_a: {}. Must be an instance of MixingRuleA "
"Enum.".format(m.name, rule))
b.add_component(cname+'_am',
Expression(b.phase_list, rule=rule_am))
def rule_bm(m, p):
try:
rule = m.params.get_phase(p).config.equation_of_state_options[
"mixing_rule_b"]
except KeyError:
rule = MixingRuleB.default
b = getattr(m, cname+"_b")
if rule == MixingRuleB.default:
return rule_bm_default(m, b, p)
else:
raise ConfigurationError(
"{} Unrecognized option for Equation of State "
"mixing_rule_a: {}. Must be an instance of MixingRuleB "
"Enum.".format(m.name, rule))
b.add_component(cname+'_bm',
Expression(b.phase_list, rule=rule_bm))
def rule_A(m, p):
am = getattr(m, cname+"_am")
return (am[p]*m.pressure /
(Cubic.gas_constant(b)*m.temperature)**2)
b.add_component(cname+'_A',
Expression(b.phase_list, rule=rule_A))
def rule_B(m, p):
bm = getattr(m, cname+"_bm")
return (bm[p]*m.pressure /
(Cubic.gas_constant(b)*m.temperature))
b.add_component(cname+'_B',
Expression(b.phase_list, rule=rule_B))
def rule_delta(m, p, i):
# See pg. 145 in Properties of Gases and Liquids
a = getattr(m, cname+"_a")
am = getattr(m, cname+"_am")
kappa = getattr(m.params, cname+"_kappa")
return (2*sqrt(a[i])/am[p] *
sum(m.mole_frac_phase_comp[p, j]*sqrt(a[j]) *
(1-kappa[i, j])
for j in b.components_in_phase(p)))
b.add_component(cname+"_delta",
Expression(b.phase_component_set,
rule=rule_delta))
def rule_dadT(m, p):
# See pg. 102 in Properties of Gases and Liquids
a = getattr(m, cname+"_a")
fw = getattr(m, cname+"_fw")
kappa = getattr(m.params, cname+"_kappa")
return -((Cubic.gas_constant(b)/2)*sqrt(EoS_param[ctype]['omegaA']) *
sum(sum(m.mole_frac_phase_comp[p, i] *
m.mole_frac_phase_comp[p, j] *
(1-kappa[i, j]) *
(fw[j]*sqrt(a[i] *
m.params.get_component(j).temperature_crit /
m.params.get_component(j).pressure_crit) +
fw[i]*sqrt(a[j] *
m.params.get_component(i).temperature_crit /
m.params.get_component(i).pressure_crit))
for j in m.components_in_phase(p))
for i in m.components_in_phase(p)) /
sqrt(m.temperature))
b.add_component(cname+"_dadT",
Expression(b.phase_list,
rule=rule_dadT))
# Add components at equilibrium state if required
if (b.params.config.phases_in_equilibrium is not None and
(not b.config.defined_state or b.always_flash)):
def func_a_eq(m, p1, p2, j):
cobj = m.params.get_component(j)
fw = getattr(m, cname+"_fw")
return (EoS_param[ctype]['omegaA']*(
(Cubic.gas_constant(b) *
cobj.temperature_crit)**2/cobj.pressure_crit) *
((1+fw[j]*(1-sqrt(m._teq[p1, p2] /
cobj.temperature_crit)))**2))
b.add_component('_'+cname+'_a_eq',
Expression(b.params._pe_pairs,
b.component_list,
rule=func_a_eq,
doc='Component a coefficient at Teq'))
def rule_am_eq(m, p1, p2, p3):
try:
rule = m.params.get_phase(p3).config.equation_of_state_options[
"mixing_rule_a"]
except KeyError:
rule = MixingRuleA.default
a = getattr(m, "_"+cname+"_a_eq")
if rule == MixingRuleA.default:
return rule_am_default(m, cname, a, p3, (p1, p2))
else:
raise ConfigurationError(
"{} Unrecognized option for Equation of State "
"mixing_rule_a: {}. Must be an instance of MixingRuleA "
"Enum.".format(m.name, rule))
b.add_component('_'+cname+'_am_eq',
Expression(b.params._pe_pairs,
b.phase_list,
rule=rule_am_eq))
def rule_A_eq(m, p1, p2, p3):
am_eq = getattr(m, "_"+cname+"_am_eq")
return (am_eq[p1, p2, p3]*m.pressure /
(Cubic.gas_constant(b)*m._teq[p1, p2])**2)
b.add_component('_'+cname+'_A_eq',
Expression(b.params._pe_pairs,
b.phase_list,
rule=rule_A_eq))
def rule_B_eq(m, p1, p2, p3):
bm = getattr(m, cname+"_bm")
return (bm[p3]*m.pressure /
(Cubic.gas_constant(b)*m._teq[p1, p2]))
b.add_component('_'+cname+'_B_eq',
Expression(b.params._pe_pairs,
b.phase_list,
rule=rule_B_eq))
def rule_delta_eq(m, p1, p2, p3, i):
# See pg. 145 in Properties of Gases and Liquids
a = getattr(m, "_"+cname+"_a_eq")
am = getattr(m, "_"+cname+"_am_eq")
kappa = getattr(m.params, cname+"_kappa")
return (2*sqrt(a[p1, p2, i])/am[p1, p2, p3] *
sum(m.mole_frac_phase_comp[p3, j]*sqrt(a[p1, p2, j]) *
(1-kappa[i, j])
for j in m.components_in_phase(p3)))
b.add_component("_"+cname+"_delta_eq",
Expression(b.params._pe_pairs,
b.phase_component_set,
rule=rule_delta_eq))
# Set up external function calls
b.add_component("_"+cname+"_ext_func_param",
Param(default=ctype.value))
b.add_component("_"+cname+"_proc_Z_liq",
ExternalFunction(library=_so,
function="ceos_z_liq"))
b.add_component("_"+cname+"_proc_Z_vap",
ExternalFunction(library=_so,
function="ceos_z_vap"))
@staticmethod
def calculate_scaling_factors(b, pobj):
pass
@staticmethod
def build_parameters(b):
b._cubic_type = b.config.equation_of_state_options["type"]
cname = b._cubic_type.name
param_block = b.parent_block()
if hasattr(param_block, cname+"_kappa"):
# Common components already constructed by previous phase
return
kappa_data = param_block.config.parameter_data[cname+"_kappa"]
param_block.add_component(
cname+'_kappa',
Var(param_block.component_list,
param_block.component_list,
within=Reals,
initialize=kappa_data,
doc=cname+' binary interaction parameters',
units=None))
@staticmethod
def compress_fact_phase(b, p):
pobj = b.params.get_phase(p)
cname = pobj._cubic_type.name
A = getattr(b, cname+"_A")
B = getattr(b, cname+"_B")
f = getattr(b, "_"+cname+"_ext_func_param")
if pobj.is_vapor_phase():
proc = getattr(b, "_"+cname+"_proc_Z_vap")
elif pobj.is_liquid_phase():
proc = getattr(b, "_"+cname+"_proc_Z_liq")
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
return proc(f, A[p], B[p])
@staticmethod
def dens_mass_phase(b, p):
return b.dens_mol_phase[p]*b.mw_phase[p]
@staticmethod
def dens_mol_phase(b, p):
pobj = b.params.get_phase(p)
if pobj.is_vapor_phase() or pobj.is_liquid_phase():
return b.pressure/(
Cubic.gas_constant(b)*b.temperature*b.compress_fact_phase[p])
else:
raise PropertyNotSupportedError(_invalid_phase_msg(b.name, p))
# TODO: Need to add functions to calculate cp and cv
@staticmethod
def energy_internal_mol_phase(blk, p):
pobj = blk.params.get_phase(p)
if not (pobj.is_vapor_phase() or pobj.is_liquid_phase()):
raise PropertyNotSupportedError(_invalid_phase_msg(blk.name, p))
cname = pobj._cubic_type.name
am = getattr(blk, cname+"_am")[p]
bm = getattr(blk, cname+"_bm")[p]
B = getattr(blk, cname+"_B")[p]
dadT = getattr(blk, cname+"_dadT")[p]
Z = blk.compress_fact_phase[p]
EoS_u = EoS_param[pobj._cubic_type]['u']
EoS_w = EoS_param[pobj._cubic_type]['w']
EoS_p = sqrt(EoS_u**2 - 4*EoS_w)
# Derived from equation on pg. 120 in Properties of Gases and Liquids
# Departure function for U is similar to H minus the RT(Z-1) term
return (((blk.temperature*dadT - am) *
safe_log((2*Z + B*(EoS_u+EoS_p)) / (2*Z + B*(EoS_u-EoS_p)),
eps=1e-6)) / (bm*EoS_p) +
sum(blk.mole_frac_phase_comp[p, j] *
EoSBase.energy_internal_mol_ig_comp_pure(blk, j)
for j in blk.components_in_phase(p)))
@staticmethod
def energy_internal_mol_phase_comp(blk, p, j):
pobj = blk.params.get_phase(p)
if not (pobj.is_vapor_phase() or pobj.is_liquid_phase()):
raise PropertyNotSupportedError(_invalid_phase_msg(blk.name, p))
cname = pobj._cubic_type.name
am = getattr(blk, cname+"_am")[p]
bm = getattr(blk, cname+"_bm")[p]
B = getattr(blk, cname+"_B")[p]
dadT = getattr(blk, cname+"_dadT")[p]
Z = blk.compress_fact_phase[p]
EoS_u = EoS_param[pobj._cubic_type]['u']
EoS_w = EoS_param[pobj._cubic_type]['w']
EoS_p = sqrt(EoS_u**2 - 4*EoS_w)
# Derived from equation on pg. 120 in Properties of Gases and Liquids
# Departure function for U is similar to H minus the RT(Z-1) term
return (((blk.temperature*dadT - am) *
safe_log((2*Z + B*(EoS_u+EoS_p)) / (2*Z + B*(EoS_u-EoS_p)),
eps=1e-6)) / (bm*EoS_p) +
EoSBase.energy_internal_mol_ig_comp_pure(blk, j))
@staticmethod
def enth_mol_phase(blk, p):
pobj = blk.params.get_phase(p)
if not (pobj.is_vapor_phase() or pobj.is_liquid_phase()):
raise PropertyNotSupportedError(_invalid_phase_msg(blk.name, p))
cname = pobj._cubic_type.name
am = getattr(blk, cname+"_am")[p]
bm = getattr(blk, cname+"_bm")[p]
B = getattr(blk, cname+"_B")[p]
dadT = getattr(blk, cname+"_dadT")[p]
Z = blk.compress_fact_phase[p]
EoS_u = EoS_param[pobj._cubic_type]['u']
EoS_w = EoS_param[pobj._cubic_type]['w']
EoS_p = sqrt(EoS_u**2 - 4*EoS_w)
# Derived from equation on pg. 120 in Properties of Gases and Liquids
return (((blk.temperature*dadT - am) *
safe_log((2*Z + B*(EoS_u+EoS_p)) / (2*Z + B*(EoS_u-EoS_p)),
eps=1e-6) +
Cubic.gas_constant(blk)*blk.temperature*(Z-1)*bm*EoS_p) /
(bm*EoS_p) + sum(blk.mole_frac_phase_comp[p, j] *
get_method(blk, "enth_mol_ig_comp", j)(
blk, cobj(blk, j), blk.temperature)
for j in blk.components_in_phase(p)))
@staticmethod
def enth_mol_phase_comp(blk, p, j):
pobj = blk.params.get_phase(p)
if not (pobj.is_vapor_phase() or pobj.is_liquid_phase()):
raise PropertyNotSupportedError(_invalid_phase_msg(blk.name, p))
cname = pobj._cubic_type.name
am = getattr(blk, cname+"_am")[p]
bm = getattr(blk, cname+"_bm")[p]
B = getattr(blk, cname+"_B")[p]
dadT = getattr(blk, cname+"_dadT")[p]
Z = blk.compress_fact_phase[p]
EoS_u = EoS_param[pobj._cubic_type]['u']
EoS_w = EoS_param[pobj._cubic_type]['w']
EoS_p = sqrt(EoS_u**2 - 4*EoS_w)
# Derived from equation on pg. 120 in Properties of Gases and Liquids
return (((blk.temperature*dadT - am) *
safe_log((2*Z + B*(EoS_u+EoS_p)) / (2*Z + B*(EoS_u-EoS_p)),
eps=1e-6) +
Cubic.gas_constant(blk)*blk.temperature*(Z-1)*bm*EoS_p) | |
Axes will NOT auto-rescale after this is called.
"""
# 2010-12-02 19:58 IJC: Created from drawRectangle
# 2014-03-01 13:51 IJMC: Added 'dodraw' option.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if kw.has_key('ax'):
ax = kw.pop('ax')
else:
ax = plt.gca()
p = mpatches.Polygon(xy, **kw)
ax.add_patch(p)
if kw.has_key('dodraw') and kw['dodraw']: plt.draw()
return ax, p
def drawCircle(x,y,radius,**kw):
"""Draw a circular patch on the current, or specified, axes.
:INPUT:
x, y -- center of circle
radius -- radius of circle
:OPTIONAL INPUT:
ax -- Axis to draw upon. if None, defaults to current axes.
dodraw -- if True, call 'draw()' function to immediately re-draw axes.
**kw -- options passable to :func:`matplotlib.patches.Circle`
:NOTE: Axes will NOT auto-rescale after this is called.
"""
# 2011-01-28 16:03 IJC: Created
# 2014-03-01 13:51 IJMC: Added 'dodraw' option.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if kw.has_key('ax'):
ax = kw.pop('ax')
else:
ax = plt.gca()
p = mpatches.Circle((x,y), radius, **kw)
ax.add_patch(p)
if kw.has_key('dodraw') and kw['dodraw']: plt.draw()
return ax, p
def drawEllipse(x,y,width, height,**kw):
"""Draw an elliptical patch on the current, or specified, axes.
:INPUT:
x, y -- center of ellipse
width -- width of ellipse
height -- width of ellipse
:OPTIONAL INPUT:
ax -- Axis to draw upon. if None, defaults to current axes.
dodraw -- if True, call 'draw()' function to immediately re-draw axes.
**kw -- options passable to :func:`matplotlib.patches.Ellipse`
(angle, linewidth, fill, ...)
:NOTE: Axes will NOT auto-rescale after this is called.
:SEE_ALSO:
:func:`drawCircle`, :func:`drawRectangle`
"""
# 2011-10-20 11:32 IJMC: Created
# 2014-03-01 13:51 IJMC: Added 'dodraw' option.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if kw.has_key('ax'):
ax = kw.pop('ax')
else:
ax = plt.gca()
p = mpatches.Ellipse((x,y), width, height, **kw)
ax.add_patch(p)
if kw.has_key('dodraw') and kw['dodraw']: plt.draw()
return ax, p
def errxy(x,y,xbins, xmode='mean', ymode='mean', xerr='minmax', yerr='sdom', clean=None, binfactor=None, verbose=False,returnstats=False, timing=False, doindex=False):
"""Bin down datasets in X and Y for errorbar plotting
:INPUTS:
x -- (array) independent variable data
y -- (array) dependent variable data
xbins -- (array) edges of bins, in x-space. Only x-data
between two bin edges will be used. Thus if M bin
edges are entered, (M-1) datapoints will be returned.
If xbins==None, then no binning is done.
:OPTIONAL INPUT:
xmode/ymode -- (str) method to aggregate x/y data into datapoints:
'mean' -- use numpy.mean
'median' -- use numpy.median
'sum' -- use numpy.sum
None -- don't compute; return the empty list []
xerr/yerr -- (str) method to aggregate x/y data into errorbars
'std' -- sample standard deviation (numpy.std)
'sdom' -- standard deviation on the mean; i.e., std/sqrt(N)
'minmax' -- use full range of data in the bin
None -- don't compute; return the empty list []
binfactor -- (int) If not None, average over this many
consecutive values instead of binning explicitly by
time-based bins. Can also be a sequence, telling the
number of values over which to average. E.g.,
binfactor=[10,10,20] will bin over the first 10 points,
the second 10 points, and the next 20 points.
clean -- (dict) keyword options to clean y-data ONLY, via
analysis.removeoutliers, with an additional "nsigma"
keyword. See removeoutliers for more information.
E.g.: clean=dict(nsigma=5,remove='both',niter=1)
:OUTPUTS: a tuple of four arrays to be passed to matplotlib.pyplot.errorbar:
xx -- locations of the aggregated x-datapoint in each bin
yy -- locations of the aggregated y-datapoint in each bin
xerr -- x-errorbars
yerr -- y-errorbars
:EXAMPLE:
::
x = hstack((arange(10), arange(20)+40))
y = randn(len(x))
xbins = [-1,15,70]
xx,yy,xerr,yerr = errxy(x,y,xbins)
plot(x,y, '.b')
errorbar(xx,yy,xerr=xerr,yerr=yerr, fmt='or')
:NOTES:
To just bin down uncleaned data (i.e., no 'error' terms
returned), set clean, xerr, yerr to None. However, when
computing all values (xerr and yerr not None) it is faster
to set clean to some rediculous value, i.e.,
clean=dict(niter=0, nsigma=9e99). This probably means more
optimization could be done.
Be sure you call the errorbar function using the keywords xerr
and yerr, since otherwise the default order of inputs to the
function is (x,y,yerr,xerr).
Data 'x' are determined to be in a bin with sides (L, R) when
satisfying the condition (x>L) and (x<=R)
:SEE ALSO: matplotlib.pyplot.errorbar, :func:`analysis.removeoutliers`
:REQUIREMENTS: :doc:`numpy`, :doc:`analysis`
"""
# 2009-09-29 20:07 IJC: Created w/mean-median and std-sdom-minmax.
# 2009-12-14 16:01 IJC: xbins can be 'None' for no binning.
# 2009-12-15 10:09 IJC: Added "binfactor" option.
# 2009-12-22 09:56 IJC: "binfactor" can now be a sequence.
# 2009-12-29 01:16 IJC: Fixed a bug with binfactor sequences.
# 2010-04-29 09:59 IJC: Added 'returnstats' feature
# 2010-10-19 16:25 IJC: Added 'sum' option for x-data
# 2011-03-22 12:57 IJC: Added 'none' option for data and errors
# 2012-03-20 16:33 IJMC: Fixed bug; xmode=='none' now works.
# 2012-03-27 14:00 IJMC: Now using np.digitize -- speed boost.
# Rewrote code to optimize (somewhat),
# cleaned up 'import' statements.
# 2012-04-08 15:57 IJMC: New speed boost from adopting
# numpy.histogram-like implementation:
# numpy.searchsorted, etc.
# 2017-02-15 09:58 IJMC: Now use "doindex" for index-based binning.
import numpy as np
from analysis import removeoutliers
if timing:
import time
tic = time.time()
def sdom(data):
"""Return standard deviation of the mean."""
return np.std(data)/np.sqrt(data.size)
def getcenter(data, cmode):
"""Get data center based on mode. Helper function."""
if cmode is None:
ret = 0
elif cmode=='mean':
ret = np.mean(data)
elif cmode=='median':
ret = np.median(data)
elif cmode=='sum':
ret = np.sum(data)
return ret
def geterr(data, emode, cmode):
"""Get errorbar. Helper function."""
if emode is None:
ret = []
elif emode=='std':
ret = np.std(data)
elif emode=='sdom':
ret = sdom(data)
elif emode=='minmax':
if len(data)==0:
ret = [np.nan, np.nan]
else:
center = getcenter(data,cmode)
ret = [center-min(data), max(data)-center]
return ret
def cleandata(data, clean, returnstats=False):
"""Clean data using removeoutliers. Helper function."""
init_count = np.array(data).size
if clean==None: # Don't clean at all!
#clean = dict(nsigma=1000, niter=0)
if returnstats:
ret = data, (init_count, init_count)
else:
ret = data
else: # Clean the data somehow ('clean' must be a dict)
if not clean.has_key('nsigma'):
clean.update(dict(nsigma=99999))
data = removeoutliers(data, **clean)
if returnstats:
ret = data, (init_count, np.array(data).size)
else:
ret = data
return ret
if timing:
print "%1.3f sec since starting function; helpers defined" % (time.time() - tic)
####### Begin main function ##########
sorted_index = np.argsort(x)
x = np.array(x, copy=False)[sorted_index]
y = np.array(y, copy=False)[sorted_index]
#x = np.array(x,copy=True).ravel()
#y = np.array(y,copy=True).ravel()
xbins = np.array(xbins,copy=True).ravel()
if xbins[0]==None and binfactor==None:
if returnstats ==False:
ret = x, y, np.ones(x.shape)*np.nan, np.ones(y.shape)*np.nan
else:
ret = x, y, np.ones(x.shape)*np.nan, np.ones(y.shape)*np.nan, (x.size, x.size)
return ret
if binfactor==None: # used passed-in 'xbins'
xbins = np.sort(xbins)
elif hasattr(binfactor,'__iter__'): # use variable-sized bins
binfactor = np.array(binfactor).copy()
sortedx = np.sort(x)
betweens = np.hstack((x.min()-1, 0.5*(sortedx[1::]+sortedx[0:len(x)-1]), x.max()+1))
xbins = []
counter = 0
for ii in range(len(binfactor)):
thisbin = betweens[counter]
xbins.append(thisbin)
counter += binfactor[ii]
xbins.append(x.max() + 1)
else: # bin down by the same factor throughout
binfactor = int(binfactor)
sortedx = np.sort(x)
betweens = np.hstack((x.min()-1, 0.5*(sortedx[1::]+sortedx[0:len(x)-1]), x.max()+1))
xbins = betweens[::binfactor]
if timing:
print "%1.3f sec since starting function; bins defined" % (time.time() - tic)
nbins = len(xbins)-1
if doindex: nbins += 1
arraynan = np.array([np.nan])
exx = []
eyy = []
xx = np.zeros(nbins)
yy = np.zeros(nbins)
yy2 = np.zeros(nbins)
init_count, final_count = y.size, 0
if timing:
setuptime = 0
xdatatime = 0
ydatatime = 0
statstime = 0
#import pylab as py
#xxx = np.sort(x)
if timing: tic1 = time.time()
#inds = np.digitize(x, xbins)
if doindex:
inds2 = [(x==xbins[ii]) for ii in range(nbins)]
else:
inds2 = [[x.searchsorted(xbins[ii], side='left'), \
x.searchsorted(xbins[ii+1], side='left')] for ii in range(nbins)]
if timing: setuptime += (time.time() - tic1)
#pdb.set_trace()
#bin_means = [data[digitized == i].mean() for i in range(1, len(bins))]
def dojob(function, vector, inds):
if len(inds)==2:
ret = function(vector[inds[0]:inds[1]])
else:
ret = function(vector[inds])
return ret
def doerrjob(vector, err, mode, inds):
if len(inds)==2:
ret = geterr(vector[inds[0]:inds[1]], err, mode)
else:
ret = geterr(vector[inds], err, mode)
return ret
dox = xmode is not None
doy = ymode is not None
doex = xerr is not None
doey = yerr is not None
if clean is | |
import os
import re
import sys
import shutil
import struct
import capstone
import tempfile
import contextlib
import subprocess
import fnmatch
import os
import string
from .errors import ASMConverterError, ASMConverterNotImplementedError
class NasmException(Exception):
pass
class CLangException(Exception):
pass
class ObjcopyException(Exception):
pass
class UndefinedSymbolException(Exception):
pass
ELF_HEADER = "7f45 4c46 0101 0100 0000 0000 0000".replace(" ", "").decode('hex')
CGC_HEADER = "7f43 4743 0101 0143 014d 6572 696e".replace(" ", "").decode('hex')
class ASMConverter(object):
size_suffix = {
1: 'b',
2: 'w',
4: 'l',
}
@staticmethod
def get_size(op):
"""
Get the size from the operand
:param str op: The operand
:return: Size in bytes
:rtype: int
"""
# memory operand
op = op.lower()
if op.strip().startswith("{"):
return 4
if "dword" in op:
return 4
elif "word" in op:
return 2
elif "byte" in op:
return 1
# register
if len(op) == 3 and op.startswith('e') and op[-1] in ('x', 'i', 'p'):
return 4
elif len(op) == 2 and any([ c in string.lowercase for c in op ]):
if not op.endswith('h') and not op.endswith('l'):
return 2
else:
return 1
return None
@staticmethod
def reg_to_att(reg):
"""
Convert a register string from intel syntax to AT&T syntax
:param str reg: The register name
:return: converted string
:rtype: str
"""
reg = reg.lower()
is_reg = False
if len(reg) == 4 and reg.startswith('xmm'):
is_reg = True
elif len(reg) == 3 and reg.startswith('e') and reg[-1] in ('x', 'i', 'p'):
is_reg = True
elif len(reg) == 2:
if reg.endswith('h') or reg.endswith('l') or reg[-1] in ('x', 'i', 'p'):
is_reg = True
if not is_reg:
return None
return "%%%s" % reg
@staticmethod
def mem_to_att_base_disp(base_reg, disp, sign):
if sign == '-':
disp = '-' + disp
return "%s(%s)" % (disp, base_reg)
@staticmethod
def mem_to_att_base_index(base_reg, index_reg, sign):
if sign == '-':
# scale is -1
return "(%s, %s, -1)" % (base_reg, index_reg)
else:
# scale is 1
return "(%s, %s)" % (base_reg, index_reg)
@staticmethod
def mem_to_att_base_index_scale(base_reg, index_reg, scale, sign):
if sign == '-':
return "(%s, %s, -%s)" % (base_reg, index_reg, scale)
else:
return "(%s, %s, %s)" % (base_reg, index_reg, scale)
@staticmethod
def mem_to_att_index_scale_disp(index_reg, scale, disp, sign):
if sign == '-':
return "%s( , %s, -%s)" % (disp, index_reg, scale)
else:
return "%s( , %s, %s)" % (disp, index_reg, scale)
@staticmethod
def mem_to_att(mem):
"""
Convert a memory operand string from intel syntax to AT&T syntax
:param str mem: The memory operand string
:return: converted string
:rtype: str
"""
m = re.match(r"[^\[]*\[([^\]]+)\]", mem)
if m:
mem_ptr = m.group(1)
# [{this_is_a_label}]
m = re.match(r"^\s*\{([\S]+)\}\s*$", mem_ptr)
if m:
label = m.group(1)
return label
# base + index * scale + displacement
scale_regex = "(0x1|0x2|0x4|0x8|1|2|4|8)"
m = re.match(r"\s*([^\s\+\-]+)\s*([\+])\s*([^\s\+\-]+)\s*\*"+ scale_regex + \
r"\s*([\+\-])\s*([^\s\+\-]+)\s*$", mem_ptr)
if m:
part_0, sign_1, part_1, scale, sign_2, part_2 = m.groups()
if all(c in string.digits for c in part_1):
# part_1 is displacement
part_2, part_1 = part_1, part_2
base_reg = ASMConverter.reg_to_att(part_0)
if base_reg is None: raise ASMConverterError('Unsupported base register "%s"' % part_0)
index_reg = ASMConverter.reg_to_att(part_1)
if index_reg is None: raise ASMConverterError('Unsupported index register "%s"' % part_1)
disp = part_2
if sign_2 == '-':
disp = '-' + disp
# negative scale should be invalid:
# "error: scale factor in address must be 1, 2, 4 or 8\nmovl -0x10(%esi, %edi, -1)"
scale = str((int(scale,base=0)))
tstr = "%s(%s, %s, %s)" % (disp, base_reg, index_reg, scale)
return tstr
# base + index + displacement
m = re.match(r"\s*([^\s\+\-]+)\s*([\+\-])\s*([^\s\+\-]+)\s*([\+\-])\s*([^\s\+\-]+)\s*$", mem_ptr)
if m:
part_0, sign_1, part_1, sign_2, part_2 = m.groups()
if all(c in string.digits for c in part_1):
# part_1 is displacement
part_2, part_1 = part_1, part_2
if not all(c in string.digits+"xX" for c in part_2):
raise ASMConverterError('Unsupported displacement string "%s"' % part_2)
base_reg = ASMConverter.reg_to_att(part_0)
if base_reg is None: raise ASMConverterError('Unsupported base register "%s"' % part_0)
index_reg = ASMConverter.reg_to_att(part_1)
if index_reg is None: raise ASMConverterError('Unsupported index register "%s"' % part_1)
disp = str((int(part_2,base=0)))
if sign_2 == '-':
disp = '-' + disp
if sign_1 == '-':
return "%s(%s, %s, -1)" % (disp, base_reg, index_reg)
else:
return "%s(%s, %s)" % (disp, base_reg, index_reg)
# base + displacement, or base + index * scale, or index * scale + displacement
m = re.match(r"\s*([^\s\+\-]+)\s*([\+\-])\s*([^\s\+\-]+)\s*$", mem_ptr)
if m:
part_0, sign, part_1 = m.group(1), m.group(2), m.group(3)
# see if this is index * scale
m0 = re.match(r"^\s*([^\s\*]+)\s*\*\s*(\d+)\s*$", part_0)
if m0:
# ouch it's index * scale
index, scale = m0.group(1), m0.group(2)
index_reg = ASMConverter.reg_to_att(index)
if part_1[0] == '{' and part_1[-1] == '}':
# disp might be a label. treat it as a displacement
disp = part_1[1:-1]
else:
# if part is a register, it's a "base + index"
part_1_reg = ASMConverter.reg_to_att(part_1)
if part_1_reg is not None:
# oh it is a register!
base_reg = part_1_reg
return ASMConverter.mem_to_att_base_index_scale(base_reg, index_reg, scale, sign)
# otherwise it's a displacement
disp = part_1
return ASMConverter.mem_to_att_index_scale_disp(index_reg, scale, disp, sign)
else:
# it's base
base = part_0
base_reg = ASMConverter.reg_to_att(base)
if base_reg is None:
# some idiot wrote it in this way: displacement + base
# e.g. {this_is_a_label} + edi
# fuck anyone who wrote assembly like that...
base, part_1 = part_1, base
base_reg = ASMConverter.reg_to_att(base)
if base_reg is None:
raise ASMConverterError('Unsupported input: %s' % mem_ptr)
# let's decide if the part is an index or a displacement
if part_1[0] == '{' and part_1[-1] == '}':
# disp might be a label. treat it as a displacement
part_1 = part_1[1:-1]
else:
# if part is a register, it's a "base + index"
disp_reg = ASMConverter.reg_to_att(part_1)
if disp_reg is not None:
# oh it is a register!
return ASMConverter.mem_to_att_base_index(base_reg, disp_reg, sign)
m1 = re.match(r"^\s*([^\s\*]+)\s*\*\s*(\d+)\s*$", part_1)
if m1:
# it's a base + index * scale
index, scale = m1.group(1), m1.group(2)
index_reg = ASMConverter.reg_to_att(index)
return ASMConverter.mem_to_att_base_index_scale(base_reg, index_reg, scale, sign)
else:
# it's a "base + displacement"
disp = part_1
return ASMConverter.mem_to_att_base_disp(base_reg, disp, sign)
# base or displacement
m = re.match(r"\s*([^\s\+\-]+)\s*$", mem_ptr)
if m:
something = m.group(1)
reg = ASMConverter.reg_to_att(something)
if reg:
# base
return "(%s)" % reg
else:
# displacement
# TODO: fix it
if something[0] == '{' and something[-1] == '}':
return something[1:-1]
return "%s" % something
if mem[0] == '{' and mem[-1] == '}':
return "$%s" % mem[1:-1]
# raise NotImplementedError('operand "%s" is not supported by ASMConverter. Please bug Fish to fix it.' % mem)
return None
@staticmethod
def imm_to_att(op):
"""
Convert an immediate to AT&T style syntax
:param str op: The operand
:return: converted string
:rtype: str
"""
m = re.match(r"\s*([0-9a-fA-Fxh]+)$", op)
if m:
imm = m.group(1)
return "$%s" % imm
@staticmethod
def to_att(op, mnemonic=None):
"""
Convert an operand from intel syntax to AT&T syntax
:param str op: the operand string
:param str mnemonic: the mnemonic
:return: converted string
:rtype: str
"""
if op[0] == '{' and op[-1] == '}':
# it's a label
label = op[1:-1]
if mnemonic[0] == 'j' or mnemonic in ('call', ):
return 'label', '%s' % label
else:
return 'label', '$' + label
new_op = ASMConverter.reg_to_att(op)
if new_op is not None:
if mnemonic[0] == 'j' or mnemonic in ('call', ):
return 'reg', '*%s' % new_op
else:
return 'reg', new_op
new_op = ASMConverter.mem_to_att(op)
if new_op is not None:
if mnemonic[0] != 'j' and mnemonic not in ('call', ):
return 'mem', new_op
else:
return 'mem', '*%s' % new_op
new_op = ASMConverter.imm_to_att(op)
if new_op is not None:
if mnemonic[0] != 'j':
return 'imm', new_op
else:
return 'imm', op
# other type of label
return 'label', op
@staticmethod
def mnemonic_to_att(m, size, op_sort=None):
if m in ('int', 'pushfd', 'popfd', 'nop', 'call',
# floating point instructions
'addss',
):
return m
if m.startswith('j'):
# jumps
return m
if m.startswith('f'):
# floating point instructions
return m
if op_sort not in ('reg', 'mem') and m.startswith('j'):
return m
# special case for some mnemonics
if m == 'movsx':
size_suffix = ASMConverter.size_suffix[size]
m = 'movs' + size_suffix + 'l'
return m
elif m == 'movzx':
size_suffix = ASMConverter.size_suffix[size]
m = 'movz' + size_suffix + 'l'
return m
m += ASMConverter.size_suffix[size]
return m
@staticmethod
def intel_to_att(asm):
# convert each line from intel syntax to AT&T syntax
converted = []
for l in asm.split('\n'):
# comments
m = | |
def _train_layer_call(self, inputs, input_mask, memory, mem_mask):
att_res = self._train_self_att_block(inputs, input_mask)
enc_att = self._train_memory_att_block(att_res, memory, mem_mask)
res = self._train_ffd_block(enc_att)
return res
def _train_input_mask(self, input_mask):
# input_mask: [batch, length]
# need: [batch, head, length, length]
batch = tf.shape(input_mask)[0]
length = tf.shape(input_mask)[1]
lower_triangle = tf.matrix_band_part(tf.ones([length, length], dtype=tf.int32), -1, 0)
lower_triangle = tf.reshape(lower_triangle, [1, 1, length, length])
lower_triangle = tf.tile(lower_triangle, [batch, self._heads, 1, 1])
input_mask = tf.expand_dims(tf.expand_dims(input_mask, 1), 2)
input_mask = tf.tile(input_mask, [1, self._heads, length, 1])
return input_mask * lower_triangle
def assign_memory(self, memory, mask):
"""联合模型中,word进行decode时的batch与训练时不同(decode出的pattern长度不同)"""
with tf.variable_scope(self._scope, reuse=tf.AUTO_REUSE):
# memory_hidden = memory.shape.as_list()[-1]
# if self._hidden != memory_hidden:
# memory = self._ffd_fn(dense(memory, self._hidden, use_bias=False, scope='input_proj_mem'))
self._step_memory = memory
memory_mask = tf.expand_dims(tf.expand_dims(mask, 1), 2) # [batch, 1, 1, length]
memory_mask = tf.tile(memory_mask, [1, self._heads, 1, 1])
self._step_mem_mask = memory_mask
self._batch = tf.shape(self._step_memory)[0]
def __call__(self, inputs, memory, input_mask, memory_mask, all_layer=False, reuse=False):
self._batch = tf.shape(inputs)[0]
with tf.variable_scope(self._scope, reuse=reuse):
hidden = inputs.shape.as_list()[-1]
if self._hidden != hidden:
inputs = self._ffd_fn(dense(inputs, self._hidden, use_bias=False, scope='input_proj'))
# if self._hidden != memory_hidden:
# memory = self._ffd_fn(dense(memory, self._hidden, use_bias=False, scope='input_proj_mem'))
input_mask = tf.to_int32(input_mask)
input_mask = self._train_input_mask(input_mask)
self.assign_memory(memory, memory_mask)
outputs = [inputs]
for layer in range(self._layer):
with tf.variable_scope('layer_%d' % layer):
now_out = self._train_layer_call(outputs[-1], input_mask, self._step_memory, self._step_mem_mask)
outputs.append(now_out)
return outputs[1:] if all_layer else outputs[-1]
def _step_self_att_block(self, inputs, input_before):
with tf.variable_scope('self_att'):
att_res = multi_head_attention(inputs, input_before, self._heads, self._att_hidden, is_train=self._is_train,
mem_mask=None, keep_prob=self._kprob, scope='self_attention')
# att_res = dense(att_res, self._hidden, scope='compress')
att_res = dropout(att_res, self._kprob, self._is_train)
att_res = layer_norm(att_res + inputs, 'att')
return att_res
def _step_memory_att_block(self, att_res, memory, mem_mask, **kwargs):
with tf.variable_scope('mem_att'):
enc_att, prob = multi_head_attention(
att_res, memory, self._heads, self._att_hidden, is_train=self._is_train,
mem_mask=mem_mask, keep_prob=self._kprob, scope='attention', is_prob=True)
self._att_prob = prob
# enc_att = dense(enc_att, self._hidden, scope='compress')
enc_att = dropout(enc_att, self._kprob, self._is_train)
enc_att = layer_norm(enc_att + att_res, 'enc_att')
return enc_att
def _step_ffd_block(self, enc_att):
with tf.variable_scope('ffd'):
res = self._ffd_fn(dense(enc_att, self._ffd_hidden, scope='ffd_w0'))
res = dense(res, self._hidden, scope='ffd_w1')
res = dropout(res, self._kprob, self._is_train)
res = layer_norm(res + enc_att, scope='ffd')
return res
def _layer_step(self, inputs, memory, mem_mask, input_before, **kwargs):
att_res = self._step_self_att_block(inputs, input_before)
enc_att = self._step_memory_att_block(att_res, memory, mem_mask, **kwargs)
res = self._step_ffd_block(enc_att)
return res
def step(self, single_input, before_input, all_layer=False, reuse=True):
"""
before_input 是一个字典,需要记录每一层的之前的输出
"""
with tf.variable_scope(self._scope, reuse=reuse):
hidden = single_input.shape.as_list()[-1]
if self._hidden != hidden:
single_input = self._ffd_fn(dense(single_input, self._hidden, use_bias=False,
scope='input_proj', reuse=reuse))
before_input['layer_-1'] = tf.cond(
before_input['is_start'], lambda: single_input,
lambda: tf.concat([before_input['layer_-1'], single_input], axis=1)
)
outputs = [single_input]
for layer in range(self._layer):
now_key = 'layer_{}'.format(layer)
pre_key = 'layer_{}'.format(layer - 1)
with tf.variable_scope('layer_%d' % layer):
now_out = self._layer_step(outputs[-1], self._step_memory, self._step_mem_mask,
before_input[pre_key])
outputs.append(now_out)
before_input[now_key] = tf.cond(
before_input['is_start'], lambda: now_out,
lambda: tf.concat([before_input[now_key], now_out], axis=1)
)
before_input['is_start'] = tf.constant(False)
return outputs[1:] if all_layer else outputs[-1], before_input
class TransformerDecoderCoverage(TransformerDecoder):
def __init__(self, hidden, layers, heads, ffd_hidden, ffd_fn=None, keep_prob=1.0,
is_train=None, scope='transformer_decoder'):
super(TransformerDecoderCoverage, self).__init__(hidden, layers, heads, ffd_hidden, ffd_fn, keep_prob,
is_train, scope)
self._collect_loss = tf.constant(0.0, dtype=tf.float32)
@property
def loss(self):
return self._collect_loss
@property
def before_input_shape(self):
before_shape = {'layer_{}_output'.format(ix): tf.TensorShape([None, None, None])
for ix in range(-1, self._layer)}
before_shape.update(
{'layer_{}_coverage'.format(ix): tf.TensorShape([None, self._heads, None, None])
for ix in range(0, self._layer)}
)
before_shape['is_start'] = tf.TensorShape([])
return before_shape
@property
def before_init(self):
before = {'layer_{}_output'.format(ix): tf.zeros((self._batch, 1, self._hidden), dtype=tf.float32)
for ix in range(-1, self._layer)}
before.update({
'layer_{}_coverage'.format(ix): tf.zeros((self._batch, self._heads, 1, 1), dtype=tf.float32)
for ix in range(0, self._layer)
})
before['is_start'] = tf.constant(True)
return before
def _train_memory_att_block(self, att_res, memory, mem_mask):
with tf.variable_scope('mem_att'):
enc_att, _, loss = multi_head_attention_with_coverage(
att_res, memory, self._heads, self._att_hidden, is_train=self._is_train,
mem_mask=mem_mask, keep_prob=self._kprob, scope='attention')
# enc_att = dense(enc_att, self._hidden, scope='compress')
enc_att = dropout(enc_att, self._kprob, self._is_train)
enc_att = layer_norm(enc_att + att_res, 'enc_att')
self._collect_loss += loss
return enc_att
def _step_memory_att_block(self, att_res, memory, mem_mask, coverage_tile_now):
with tf.variable_scope('mem_att'):
enc_att, coverage_tile_now, _ = multi_head_attention_with_coverage(
att_res, memory, self._heads, self._att_hidden, is_train=self._is_train,
mem_mask=mem_mask, keep_prob=self._kprob, scope='attention', coverage=coverage_tile_now,
)
# enc_att = dense(enc_att, self._hidden, scope='compress')
enc_att = dropout(enc_att, self._kprob, self._is_train)
enc_att = layer_norm(enc_att + att_res, 'enc_att')
return enc_att, coverage_tile_now
def _layer_step_coverage(self, inputs, memory, mem_mask, input_before, coverage_tile_now):
att_res = self._step_self_att_block(inputs, input_before)
enc_att, coverage_tile_now = self._step_memory_att_block(att_res, memory, mem_mask,
coverage_tile_now=coverage_tile_now)
res = self._step_ffd_block(enc_att)
return res, coverage_tile_now
def step(self, single_input, before_input, all_layer=False, reuse=True):
"""
before_input 是一个字典,需要记录每一层的之前的输出
"""
with tf.variable_scope(self._scope, reuse=reuse):
hidden = single_input.shape.as_list()[-1]
if self._hidden != hidden:
single_input = self._ffd_fn(dense(single_input, self._hidden, use_bias=False,
scope='input_proj', reuse=reuse))
before_input['layer_-1_output'] = tf.cond(
before_input['is_start'], lambda: single_input,
lambda: tf.concat([before_input['layer_-1_output'], single_input], axis=1)
)
mem_length = tf.shape(self._step_memory)[1]
before_input['layer_0_coverage'] = tf.cond(
before_input['is_start'], lambda: tf.zeros((self._batch, self._heads, mem_length, 1), dtype=tf.float32),
lambda: before_input['layer_0_coverage']
)
outputs = [single_input]
for layer in range(self._layer):
now_key = 'layer_{}_'.format(layer)
pre_key = 'layer_{}_'.format(layer - 1)
with tf.variable_scope('layer_{}'.format(layer)):
now_out, coverage = self._layer_step_coverage(
outputs[-1], self._step_memory, self._step_mem_mask,
before_input[pre_key + 'output'], before_input[now_key + 'coverage']
)
outputs.append(now_out)
before_input[now_key + 'output'] = tf.cond(
before_input['is_start'], lambda: now_out,
lambda: tf.concat([before_input[now_key + 'output'], now_out], axis=1)
)
before_input[now_key + 'coverage'] = coverage
before_input['is_start'] = tf.constant(False)
return outputs[1:] if all_layer else outputs[-1], before_input
class TransformerDecoderCLow(TransformerDecoderCoverage):
def _train_memory_att_block(self, att_res, memory, mem_mask):
with tf.variable_scope('mem_att'):
enc_att, _, loss = multi_head_attention_coverage_low(
att_res, memory, self._heads, self._att_hidden, is_train=self._is_train,
mem_mask=mem_mask, keep_prob=self._kprob, scope='attention')
# enc_att = dense(enc_att, self._hidden, scope='compress')
enc_att = dropout(enc_att, self._kprob, self._is_train)
enc_att = layer_norm(enc_att + att_res, 'enc_att')
self._collect_loss += loss
return enc_att
def _step_memory_att_block(self, att_res, memory, mem_mask, coverage_tile_now):
with tf.variable_scope('mem_att'):
enc_att, coverage_tile_now, _ = multi_head_attention_coverage_low(
att_res, memory, self._heads, self._att_hidden, is_train=self._is_train,
mem_mask=mem_mask, keep_prob=self._kprob, scope='attention', coverage=coverage_tile_now,
)
# enc_att = dense(enc_att, self._hidden, scope='compress')
enc_att = dropout(enc_att, self._kprob, self._is_train)
enc_att = layer_norm(enc_att + att_res, 'enc_att')
return enc_att, coverage_tile_now
def convolution(inputs, output_size, bias=True, kernel_size=1, mode='SAME', scope="conv"):
with tf.variable_scope(scope):
shapes = inputs.shape.as_list()
if len(shapes) > 4:
raise NotImplementedError
elif len(shapes) == 4:
filter_shape = [1, kernel_size, shapes[-1], output_size]
bias_shape = [1, 1, 1, output_size]
strides = [1, 1, 1, 1]
else:
filter_shape = [kernel_size, shapes[-1], output_size]
bias_shape = [1, 1, output_size]
strides = 1
conv_func = tf.nn.conv1d if len(shapes) == 3 else tf.nn.conv2d
kernel_ = tf.get_variable("kernel_", filter_shape, dtype=tf.float32, )
outputs = conv_func(inputs, kernel_, strides, mode)
if bias:
outputs += tf.get_variable("bias_", bias_shape, initializer=tf.zeros_initializer())
return outputs
def dropout(inputs, keep_prob, is_train):
if keep_prob < 1.0:
inputs = tf.cond(is_train, lambda: tf.nn.dropout(inputs, keep_prob), lambda: inputs)
return inputs
def softmax_mask(val, mask):
return -INF * (1 - tf.cast(mask, tf.float32)) + val
def summ(memory, hidden, mask, keep_prob=1.0, is_train=None, scope="summ", is_prob=False):
with tf.variable_scope(scope):
d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train)
s0 = tf.nn.tanh(dense(d_memory, hidden, scope="s0"))
s = dense(s0, 1, use_bias=False, scope="s")
s1 = softmax_mask(tf.squeeze(s, [2]), mask)
a = tf.expand_dims(tf.nn.softmax(s1), axis=2)
res = tf.reduce_sum(a * memory, axis=1)
if not is_prob:
return res
else:
return res, a
def dot_attention(inputs, memory, mask, hidden, keep_prob=1.0, is_train=None, scope="dot_attention", is_prob=False):
with tf.variable_scope(scope):
d_inputs = dropout(inputs, keep_prob=keep_prob, is_train=is_train)
d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train)
JX = tf.shape(inputs)[1]
with tf.variable_scope("attention"):
inputs_ = tf.nn.relu(
dense(d_inputs, hidden, use_bias=False, scope="inputs"))
memory_ = tf.nn.relu(
dense(d_memory, hidden, use_bias=False, scope="memory"))
outputs = tf.matmul(inputs_, tf.transpose(
memory_, [0, 2, 1])) / (hidden ** 0.5)
if mask is not None:
mask = tf.tile(tf.expand_dims(mask, axis=1), [1, JX, 1])
outputs = softmax_mask(outputs, mask)
logits = tf.nn.softmax(outputs)
outputs = tf.matmul(logits, memory)
return outputs, logits
# res = tf.concat([inputs, outputs], axis=2)
# with tf.variable_scope("gate"):
# dim = res.get_shape().as_list()[-1]
# d_res = dropout(res, keep_prob=keep_prob, is_train=is_train)
# gate = tf.nn.sigmoid(dense(d_res, dim, use_bias=False))
# return res * gate
def dense(inputs, hidden, use_bias=True, scope="dense", reuse=False):
return tf.layers.dense(inputs, hidden, use_bias=use_bias, name=scope, reuse=reuse)
def fusion(old, new, hidden_size, name):
# 连接特征
tmp = tf.concat([old, new, old*new, old-new], axis=2) # b, len, hidden*4
# 激活
new_sens_tanh = tf.nn.tanh(dense(tmp, hidden_size*2, scope=name))
# gate
gate = tf.nn.sigmoid(dense(tmp, 1, scope=name+"sigmoid"))
outputs = gate*new_sens_tanh + (1-gate)*old
return outputs
def mask_softmax(x, mask, dim=-1):
"""for reweight the softmax result. prevent the padding weights.
assume the softmax is conducted on the last dimension
Args:
x: [batch, *, len]
mask: [batch, len]
Return:
[bs, *, len]
"""
# mask = mask.float()
# if mask.dim() < x.dim():
# mask = mask.unsqueeze(1)
# result = tf.nn.softmax(x * mask, dim=dim)
# result = result * mask
# result = result / (result.sum(dim=dim, keepdim=True) + 1e-13)
## return result.view(*x.size())
# if tf.rank(mask) < tf.rank(x):
mask = tf.expand_dims(mask, 1)
mask = tf.to_float(mask)
result = tf.nn.softmax(x * mask, dim=dim)
result = result * mask
result = result / (tf.reduce_sum(result, axis=dim, keep_dims=True) + 1e-13)
# keep_dims:表示是否保留原始数据的维度,False相当于执行完后原始数据就会少一个维度
return result
def batch_norm(x, is_train, scope='batch_norm'):
with tf.variable_scope(scope):
res = tf.layers.batch_normalization(x, training=is_train)
return res
def layer_norm(x, scope='', reuse=False, epsilon=1e-6):
# return tf.contrib.layers.layer_norm(
# inputs=x, begin_norm_axis=-1, begin_params_axis=-1, scope='layer_norm_' + scope, reuse=reuse)
filters = x.shape.as_list()[-1]
with tf.variable_scope('layer_norm_' + scope, reuse=reuse):
scale = tf.get_variable(
"scale", [filters], initializer=tf.ones_initializer())
bias = | |
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = CosDownloadInfo()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class DescribeGroupInstancesRequest(AbstractModel):
"""DescribeGroupInstances请求参数结构体
"""
def __init__(self):
"""
:param GroupId: 部署组ID
:type GroupId: str
:param SearchWord: 搜索字段
:type SearchWord: str
:param OrderBy: 排序字段
:type OrderBy: str
:param OrderType: 排序类型
:type OrderType: int
:param Offset: 偏移量
:type Offset: int
:param Limit: 分页个数
:type Limit: int
"""
self.GroupId = None
self.SearchWord = None
self.OrderBy = None
self.OrderType = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.GroupId = params.get("GroupId")
self.SearchWord = params.get("SearchWord")
self.OrderBy = params.get("OrderBy")
self.OrderType = params.get("OrderType")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeGroupInstancesResponse(AbstractModel):
"""DescribeGroupInstances返回参数结构体
"""
def __init__(self):
"""
:param Result: 部署组机器信息
注意:此字段可能返回 null,表示取不到有效值。
:type Result: :class:`tencentcloud.tsf.v20180326.models.TsfPageInstance`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = TsfPageInstance()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class DescribeGroupRequest(AbstractModel):
"""DescribeGroup请求参数结构体
"""
def __init__(self):
"""
:param GroupId: 部署组ID
:type GroupId: str
"""
self.GroupId = None
def _deserialize(self, params):
self.GroupId = params.get("GroupId")
class DescribeGroupResponse(AbstractModel):
"""DescribeGroup返回参数结构体
"""
def __init__(self):
"""
:param Result: 虚拟机部署组详情
注意:此字段可能返回 null,表示取不到有效值。
:type Result: :class:`tencentcloud.tsf.v20180326.models.VmGroup`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = VmGroup()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class DescribeGroupsRequest(AbstractModel):
"""DescribeGroups请求参数结构体
"""
def __init__(self):
"""
:param SearchWord: 搜索字段
:type SearchWord: str
:param ApplicationId: 应用ID
:type ApplicationId: str
:param OrderBy: 排序字段
:type OrderBy: str
:param OrderType: 排序方式
:type OrderType: int
:param Offset: 偏移量
:type Offset: int
:param Limit: 分页个数
:type Limit: int
:param NamespaceId: 命名空间ID
:type NamespaceId: str
:param ClusterId: 集群ID
:type ClusterId: str
:param GroupResourceTypeList: 部署组资源类型列表
:type GroupResourceTypeList: list of str
"""
self.SearchWord = None
self.ApplicationId = None
self.OrderBy = None
self.OrderType = None
self.Offset = None
self.Limit = None
self.NamespaceId = None
self.ClusterId = None
self.GroupResourceTypeList = None
def _deserialize(self, params):
self.SearchWord = params.get("SearchWord")
self.ApplicationId = params.get("ApplicationId")
self.OrderBy = params.get("OrderBy")
self.OrderType = params.get("OrderType")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.NamespaceId = params.get("NamespaceId")
self.ClusterId = params.get("ClusterId")
self.GroupResourceTypeList = params.get("GroupResourceTypeList")
class DescribeGroupsResponse(AbstractModel):
"""DescribeGroups返回参数结构体
"""
def __init__(self):
"""
:param Result: 虚拟机部署组分页信息
注意:此字段可能返回 null,表示取不到有效值。
:type Result: :class:`tencentcloud.tsf.v20180326.models.TsfPageVmGroup`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = TsfPageVmGroup()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class DescribeImageTagsRequest(AbstractModel):
"""DescribeImageTags请求参数结构体
"""
def __init__(self):
"""
:param ApplicationId: 应用Id
:type ApplicationId: str
:param Offset: 偏移量,取值从0开始
:type Offset: int
:param Limit: 分页个数,默认为20, 取值应为1~100
:type Limit: int
:param QueryImageIdFlag: 不填和0:查询 1:不查询
:type QueryImageIdFlag: int
:param SearchWord: 可用于搜索的 tag 名字
:type SearchWord: str
"""
self.ApplicationId = None
self.Offset = None
self.Limit = None
self.QueryImageIdFlag = None
self.SearchWord = None
def _deserialize(self, params):
self.ApplicationId = params.get("ApplicationId")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.QueryImageIdFlag = params.get("QueryImageIdFlag")
self.SearchWord = params.get("SearchWord")
class DescribeImageTagsResponse(AbstractModel):
"""DescribeImageTags返回参数结构体
"""
def __init__(self):
"""
:param Result: 查询的权限数据对象
:type Result: :class:`tencentcloud.tsf.v20180326.models.ImageTagsResult`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = ImageTagsResult()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class DescribeLaneRulesRequest(AbstractModel):
"""DescribeLaneRules请求参数结构体
"""
def __init__(self):
"""
:param Limit: 每页展示的条数
:type Limit: int
:param Offset: 翻页偏移量
:type Offset: int
:param SearchWord: 搜索关键词
:type SearchWord: str
:param RuleId: 泳道规则ID(用于精确搜索)
:type RuleId: str
"""
self.Limit = None
self.Offset = None
self.SearchWord = None
self.RuleId = None
def _deserialize(self, params):
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.SearchWord = params.get("SearchWord")
self.RuleId = params.get("RuleId")
class DescribeLaneRulesResponse(AbstractModel):
"""DescribeLaneRules返回参数结构体
"""
def __init__(self):
"""
:param Result: 泳道规则列表
注意:此字段可能返回 null,表示取不到有效值。
:type Result: :class:`tencentcloud.tsf.v20180326.models.LaneRules`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = LaneRules()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class DescribeLanesRequest(AbstractModel):
"""DescribeLanes请求参数结构体
"""
def __init__(self):
"""
:param Limit: 每页展示的条数
:type Limit: int
:param Offset: 翻页偏移量
:type Offset: int
:param SearchWord: 搜索关键字
:type SearchWord: str
"""
self.Limit = None
self.Offset = None
self.SearchWord = None
def _deserialize(self, params):
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
self.SearchWord = params.get("SearchWord")
class DescribeLanesResponse(AbstractModel):
"""DescribeLanes返回参数结构体
"""
def __init__(self):
"""
:param Result: 泳道列表
注意:此字段可能返回 null,表示取不到有效值。
:type Result: :class:`tencentcloud.tsf.v20180326.models.LaneInfos`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = LaneInfos()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class DescribeMicroserviceRequest(AbstractModel):
"""DescribeMicroservice请求参数结构体
"""
def __init__(self):
"""
:param MicroserviceId: 微服务ID
:type MicroserviceId: str
:param Offset: 偏移量
:type Offset: int
:param Limit: 分页个数
:type Limit: int
"""
self.MicroserviceId = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.MicroserviceId = params.get("MicroserviceId")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeMicroserviceResponse(AbstractModel):
"""DescribeMicroservice返回参数结构体
"""
def __init__(self):
"""
:param Result: 微服务详情实例列表
注意:此字段可能返回 null,表示取不到有效值。
:type Result: :class:`tencentcloud.tsf.v20180326.models.TsfPageMsInstance`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = TsfPageMsInstance()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class DescribeMicroservicesRequest(AbstractModel):
"""DescribeMicroservices请求参数结构体
"""
def __init__(self):
"""
:param NamespaceId: 命名空间ID
:type NamespaceId: str
:param SearchWord: 搜索字段
:type SearchWord: str
:param OrderBy: 排序字段
:type OrderBy: str
:param OrderType: 排序类型
:type OrderType: int
:param Offset: 偏移量
:type Offset: int
:param Limit: 分页个数
:type Limit: int
"""
self.NamespaceId = None
self.SearchWord = None
self.OrderBy = None
self.OrderType = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.NamespaceId = params.get("NamespaceId")
self.SearchWord = params.get("SearchWord")
self.OrderBy = params.get("OrderBy")
self.OrderType = params.get("OrderType")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeMicroservicesResponse(AbstractModel):
"""DescribeMicroservices返回参数结构体
"""
def __init__(self):
"""
:param Result: 微服务分页列表信息
注意:此字段可能返回 null,表示取不到有效值。
:type Result: :class:`tencentcloud.tsf.v20180326.models.TsfPageMicroservice`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = TsfPageMicroservice()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class DescribeMsApiListRequest(AbstractModel):
"""DescribeMsApiList请求参数结构体
"""
def __init__(self):
"""
:param MicroserviceId: 微服务ID
:type MicroserviceId: str
:param SearchWord: 搜索关键字
:type SearchWord: str
:param Limit: 每页的数量
:type Limit: int
:param Offset: 翻页偏移量
:type Offset: int
"""
self.MicroserviceId = None
self.SearchWord = None
self.Limit = None
self.Offset = None
def _deserialize(self, params):
self.MicroserviceId = params.get("MicroserviceId")
self.SearchWord = params.get("SearchWord")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
class DescribeMsApiListResponse(AbstractModel):
"""DescribeMsApiList返回参数结构体
"""
def __init__(self):
"""
:param Result: 相应结果
:type Result: :class:`tencentcloud.tsf.v20180326.models.TsfApiListResponse`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = TsfApiListResponse()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class DescribePkgsRequest(AbstractModel):
"""DescribePkgs请求参数结构体
"""
def __init__(self):
"""
:param ApplicationId: 应用ID(只传入应用ID,返回该应用下所有软件包信息)
:type ApplicationId: str
:param SearchWord: 查询关键字(支持根据包ID,包名,包版本号搜索)
:type SearchWord: str
:param OrderBy: 排序关键字(默认为"UploadTime":上传时间)
:type OrderBy: str
:param OrderType: 升序:0/降序:1(默认降序)
:type OrderType: int
:param Offset: 查询起始偏移
:type Offset: int
:param Limit: 返回数量限制
:type Limit: int
:param RepositoryType: 程序包仓库类型
:type RepositoryType: str
:param RepositoryId: 程序包仓库id
:type RepositoryId: str
"""
self.ApplicationId = None
self.SearchWord = None
self.OrderBy = None
self.OrderType = None
self.Offset = None
self.Limit = None
self.RepositoryType = None
self.RepositoryId = None
def _deserialize(self, params):
self.ApplicationId = params.get("ApplicationId")
self.SearchWord = params.get("SearchWord")
self.OrderBy = params.get("OrderBy")
self.OrderType = params.get("OrderType")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.RepositoryType = params.get("RepositoryType")
self.RepositoryId = params.get("RepositoryId")
class DescribePkgsResponse(AbstractModel):
"""DescribePkgs返回参数结构体
"""
def __init__(self):
"""
:param Result: 符合查询程序包信息列表
:type Result: :class:`tencentcloud.tsf.v20180326.models.PkgList`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = PkgList()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class DescribePodInstancesRequest(AbstractModel):
"""DescribePodInstances请求参数结构体
"""
def __init__(self):
"""
:param GroupId: 实例所属groupId
:type GroupId: str
:param Offset: 偏移量,取值从0开始
:type Offset: int
:param Limit: 分页个数,默认为20, 取值应为1~50
:type Limit: int
"""
self.GroupId = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.GroupId = params.get("GroupId")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribePodInstancesResponse(AbstractModel):
"""DescribePodInstances返回参数结构体
"""
def __init__(self):
"""
:param Result: 查询的权限数据对象
:type Result: :class:`tencentcloud.tsf.v20180326.models.GroupPodResult`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Result = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Result") is not None:
self.Result = GroupPodResult()
self.Result._deserialize(params.get("Result"))
self.RequestId = params.get("RequestId")
class DescribePublicConfigReleaseLogsRequest(AbstractModel):
"""DescribePublicConfigReleaseLogs请求参数结构体
"""
def __init__(self):
"""
:param NamespaceId: 命名空间ID,不传入时查询全量
:type NamespaceId: str
:param Offset: 偏移量,默认为0
:type Offset: int
:param Limit: 每页条数,默认为20
:type Limit: int
"""
self.NamespaceId = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.NamespaceId = params.get("NamespaceId")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribePublicConfigReleaseLogsResponse(AbstractModel):
"""DescribePublicConfigReleaseLogs返回参数结构体
| |
r"""
EXAMPLES::
sage: from sage.schemes.toric.chow_group import *
sage: P2=toric_varieties.P2()
sage: A = ChowGroup_class(P2,ZZ,True); A
Chow group of 2-d CPR-Fano toric variety covered by 3 affine patches
sage: is_ChowGroup(A)
True
sage: is_ChowCycle(A.an_element())
True
TESTS::
sage: A_ZZ = P2.Chow_group()
sage: 2 * A_ZZ.an_element() * 3
( 6 | 0 | 0 )
sage: 1/2 * A_ZZ.an_element() * 1/3
Traceback (most recent call last):
...
TypeError: unsupported operand parent(s) for '*': 'Rational Field'
and 'Chow group of 2-d CPR-Fano toric variety covered by 3 affine patches'
sage: A_ZZ.get_action(ZZ)
Right scalar multiplication by Integer Ring on Chow group of 2-d
CPR-Fano toric variety covered by 3 affine patches
sage: A_ZZ.get_action(QQ)
You can't multiply integer classes with fractional
numbers. For that you need to go to the rational Chow group::
sage: A_QQ = P2.Chow_group(QQ)
sage: 2 * A_QQ.an_element() * 3
( 0 | 0 | 6 )
sage: 1/2 * A_QQ.an_element() * 1/3
( 0 | 0 | 1/6 )
sage: A_QQ.get_action(ZZ)
Right scalar multiplication by Integer Ring on QQ-Chow group of 2-d
CPR-Fano toric variety covered by 3 affine patches
sage: A_QQ.get_action(QQ)
Right scalar multiplication by Rational Field on QQ-Chow group of 2-d
CPR-Fano toric variety covered by 3 affine patches
"""
self._variety = toric_variety
# cones are automatically sorted by dimension
self._cones = flatten( toric_variety.fan().cones() )
V = FreeModule(base_ring, len(self._cones))
W = self._rational_equivalence_relations(V)
super(ChowGroup_class,self).__init__(V, W, check)
def scheme(self):
r"""
Return the underlying toric variety.
OUTPUT:
A :class:`ToricVariety
<sage.schemes.toric.variety.ToricVariety_field>`.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: A = P2.Chow_group()
sage: A.scheme()
2-d CPR-Fano toric variety covered by 3 affine patches
sage: A.scheme() is P2
True
"""
return self._variety
def _element_constructor_(self, x, check=True):
r"""
Construct a :class:`ChowCycle`.
INPUT:
- ``x`` -- a cone of the fan, a toric divisor, or a valid
input for
:class:sage.modules.fg_pid.fgp_module.FGP_Module_class`.
- ``check`` -- bool (default: ``True``). See
:class:sage.modules.fg_pid.fgp_module.FGP_Module_class`.
EXAMPLES::
sage: dP6 = toric_varieties.dP6()
sage: A = dP6.Chow_group()
sage: cone = dP6.fan(dim=1)[4]
sage: A(cone)
( 0 | 0, 1, 0, 0 | 0 )
sage: A(Cone(cone)) # isomorphic but not identical to a cone of the fan!
( 0 | 0, 1, 0, 0 | 0 )
sage: A( dP6.K() )
( 0 | -1, -2, -2, -1 | 0 )
"""
fan = self._variety.fan()
if is_Cone(x):
cone = fan.embed(x)
return self.element_class(self, self._cone_to_V(cone), False)
if is_ToricDivisor(x):
v = sum(x.coefficient(i)*self._cone_to_V(onecone)
for i,onecone in enumerate(fan(1)))
return self.element_class(self, v, False)
return super(ChowGroup_class,self)._element_constructor_(x, check)
def _coerce_map_from_(self, S):
"""
Return true if S canonically coerces to self.
EXAMPLES::
sage: A = toric_varieties.P2().Chow_group()
sage: A._coerce_map_from_(ZZ) # private method
False
sage: A.has_coerce_map_from(ZZ) # recommended usage
False
"""
# We might want to coerce Cone_of_fans into ChowCycles
# but cones don't have parents at the moment.
return super(ChowGroup_class,self)._coerce_map_from_(S)
def _rational_equivalence_relations(self, V):
r"""
Return the rational equivalence relations between the cones of the fan.
See :meth:`relation_gens` for details.
EXAMPLES::
sage: points_mod = lambda k: matrix([[ 1, 1, 2*k+1],[ 1,-1, 1],[-1, 1, 1],[-1,-1, 1],[-1,-1,-1],[-1, 1,-1],[ 1,-1,-1],[ 1, 1,-1]])
sage: points = lambda k: matrix([[1,1,1],[1,-1,1],[-1,1,1]]).solve_left(points_mod(k)).rows()
sage: cones = [[0,1,2,3],[4,5,6,7],[0,1,7,6],[4,5,3,2],[0,2,5,7],[4,6,1,3]]
sage: X_Delta = lambda k: ToricVariety( Fan(cones=cones, rays=points(k)) )
sage: from sage.schemes.toric.chow_group import ChowGroup
sage: A = ChowGroup( X_Delta(2) )
sage: rel = A._rational_equivalence_relations(A.cover()).basis()
sage: matrix(rel).submatrix(col=0, ncols=1).elementary_divisors()
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
sage: matrix(rel).submatrix(col=1, ncols=8).elementary_divisors()
[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
sage: matrix(rel).submatrix(col=9, ncols=12).elementary_divisors()
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0]
sage: matrix(rel).submatrix(col=21, ncols=6).elementary_divisors()
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
"""
fan = self._variety.fan()
dim = self._variety.dimension()
relations = []
for rho in self._cones:
for u in rho.orthogonal_sublattice().gens():
rel = V.zero()
for sigma in rho.facet_of():
sigma_idx = self._cones.index(sigma)
Q = sigma.relative_quotient(rho)
for v in [n.lift() for n in Q.gens()]:
rel += (u*v) * V.gen(sigma_idx)
relations.append(rel)
return V.span(relations)
def __div__(self, other):
r"""
Return the quotient of the Chow group by a subgroup.
OUTPUT:
Currently not implemented.
EXAMPLES::
sage: A = toric_varieties.dP6().Chow_group()
sage: Asub = A.submodule([ A.gen(0), A.gen(3) ])
sage: A/Asub
Traceback (most recent call last):
...
NotImplementedError: Quotients of the Chow group are not implemented.
"""
raise NotImplementedError, 'Quotients of the Chow group are not implemented.'
def _repr_(self):
"""
Return a string representation.
EXAMPLES::
sage: P2=toric_varieties.P2()
sage: from sage.schemes.toric.chow_group import ChowGroup
sage: ChowGroup(P2,ZZ)._repr_()
'Chow group of 2-d CPR-Fano toric variety covered by 3 affine patches'
sage: ChowGroup(P2,QQ)._repr_()
'QQ-Chow group of 2-d CPR-Fano toric variety covered by 3 affine patches'
"""
if self.base_ring() == QQ:
return "QQ-Chow group of " + str(self._variety)
elif self.base_ring() == ZZ:
return "Chow group of " + str(self._variety)
else:
raise(ValueError, 'Base ring must be QQ or ZZ.')
def __eq__(self, other):
r"""
Comparison of two Chow groups.
INPUT:
- ``other`` -- anything.
OUTPUT:
``True`` or ``False``.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: P2.Chow_group() == P2.Chow_group()
True
sage: P2.Chow_group(ZZ) == P2.Chow_group(QQ)
False
"""
return self is other # ChowGroup_class is unique
def _cone_to_V(self, cone):
r"""
Convert a cone into the corresponding vector in ``self._V``
INPUT:
- ``cone`` -- a :class:`sage.geometry.cone.ConvexRationalPolyhedralCone`.
OUPUT:
The corresponding element of ``self.V()``.
EXAMPLES::
sage: P2 = toric_varieties.P2()
sage: A = P2.Chow_group()
sage: cone = P2.fan(dim=1)[0]
sage: A._cone_to_V(cone)
(0, 1, 0, 0, 0, 0, 0)
"""
assert cone.ambient() is self._variety.fan()
x = [0] * len(self._cones)
x[self._cones.index(cone)] = 1
return self._V(x)
def degree(self, k=None):
r"""
Return the degree-`k` Chow group.
INPUT:
- ``k`` -- an integer or ``None`` (default). The degree of the
Chow group.
OUTPUT:
- if `k` was specified, the Chow group `A_k` as an Abelian
group.
- if `k` was not specified, a tuple containing the Chow groups
in all degrees.
.. NOTE::
* For a smooth toric variety, this is the same as the
Poincare-dual cohomology group
`H^{d-2k}(X,\ZZ)`.
* For a simplicial toric variety ("orbifold"),
`A_k(X)\otimes \QQ = H^{d-2k}(X,\QQ)`.
EXAMPLES:
Four exercises from page 65 of [FultonP65]_. First, an example
with `A_1(X)=\ZZ\oplus\ZZ/3\ZZ`::
sage: X = ToricVariety(Fan(cones=[[0,1],[1,2],[2,0]],
... rays=[[2,-1],[-1,2],[-1,-1]]))
sage: A = X.Chow_group()
sage: A.degree(1)
C3 x Z
Second, an example with `A_2(X)=\ZZ^2`::
sage: points = [[1,0,0],[0,1,0],[0,0,1],[1,-1,1],[-1,0,-1]]
sage: l = LatticePolytope(matrix(points).transpose())
sage: l.show3d()
sage: X = ToricVariety(FaceFan(l))
sage: A = X.Chow_group()
sage: A.degree(2)
Z^2
Third, an example with `A_2(X)=\ZZ^5`::
sage: cube = [[ 1,0,0],[0, 1,0],[0,0, 1],[-1, 1, 1],
... [-1,0,0],[0,-1,0],[0,0,-1],[ 1,-1,-1]]
sage: lat_cube = LatticePolytope(matrix(cube).transpose())
sage: X = ToricVariety(FaceFan((LatticePolytope(lat_cube))))
sage: X.Chow_group().degree(2)
Z^5
Fourth, a fan that is not the fan over a
polytope. Combinatorially, the fan is the same in the third
example, only the coordinates of the first point are
different. But the resulting fan is not the face fan of a
cube, so the variety is "more singular". Its Chow group has
torsion, `A_2(X)=\ZZ^5 \oplus \ZZ/2`::
sage: rays = [[ 1, 2, 3],[ 1,-1, 1],[-1, 1, 1],[-1,-1, 1],
... [-1,-1,-1],[-1, 1,-1],[ 1,-1,-1],[ 1, 1,-1]]
sage: cones = [[0,1,2,3],[4,5,6,7],[0,1,7,6],
... [4,5,3,2],[0,2,5,7],[4,6,1,3]]
sage: X = ToricVariety(Fan(cones, rays))
sage: X.Chow_group().degree(2) # long time (2s on sage.math, 2011)
C2 x Z^5
Finally, Example 1.3 of [FS]_::
sage: points_mod = lambda k: matrix([[ 1, 1, 2*k+1],[ 1,-1, 1],
... [-1, 1, 1],[-1,-1, 1],[-1,-1,-1],
... [-1, 1,-1],[ 1,-1,-1],[ 1, 1,-1]])
sage: rays = lambda k: matrix([[1,1,1],[1,-1,1],[-1,1,1]]
... ).solve_left(points_mod(k)).rows()
sage: cones = [[0,1,2,3],[4,5,6,7],[0,1,7,6],
... [4,5,3,2],[0,2,5,7],[4,6,1,3]]
sage: X_Delta = lambda k: ToricVariety(Fan(cones=cones, rays=rays(k)))
sage: X_Delta(0).Chow_group().degree() # long time (3s on sage.math, 2011)
(Z, Z, Z^5, Z)
sage: X_Delta(1).Chow_group().degree() # long time (3s on sage.math, 2011)
(Z, 0, Z^5, Z)
sage: X_Delta(2).Chow_group().degree() # long time (3s on sage.math, 2011)
(Z, C2, Z^5, Z)
sage: X_Delta(2).Chow_group(base_ring=QQ).degree() # long time (4s on sage.math, 2011)
(Q, 0, Q^5, Q)
"""
if k!=None:
return self.degree()[k]
try:
return self._degree
except AttributeError:
pass
self._degree = tuple(ChowGroup_degree_class(self,d)
for d in range(0,self._variety.dimension()+1))
return self._degree
def coordinate_vector(self, chow_cycle, degree=None, reduce=True):
r"""
Return the coordinate vector of the ``chow_cycle``.
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
from ralph_scrooge.tests import ScroogeTestCase
from ralph_scrooge.models import (
Environment,
Service,
ServiceEnvironment,
)
from ralph_scrooge.models import (
Team,
TeamManager,
TeamServiceEnvironmentPercent,
)
from ralph_scrooge.tests.utils.factory import (
ServiceEnvironmentFactory,
TeamFactory,
)
class TestTeamTimeDivision(ScroogeTestCase):
def setUp(self):
superuser = get_user_model().objects.create_superuser(
'test', '<EMAIL>', '<PASSWORD>'
)
self.client = APIClient()
self.client.force_authenticate(superuser)
self.date = datetime.date(2016, 9, 8)
self.service_environment1 = ServiceEnvironmentFactory()
self.service_environment2 = ServiceEnvironmentFactory()
self.service_environment3 = ServiceEnvironmentFactory()
self.service1_uid = self.service_environment1.service.ci_uid
self.service2_uid = self.service_environment2.service.ci_uid
self.service3_uid = self.service_environment3.service.ci_uid
self.service1_env_name = self.service_environment1.environment.name
self.service2_env_name = self.service_environment2.environment.name
self.service3_env_name = self.service_environment3.environment.name
self.team = TeamFactory()
def test_if_uploaded_division_is_the_same_when_fetched_with_get(self):
division = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
"service_uid": self.service2_uid,
"environment": self.service2_env_name,
"percent": 20.0,
},
{
"service_uid": self.service3_uid,
"environment": self.service3_env_name,
"percent": 20.0,
}
]
}
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(division),
content_type='application/json',
)
self.assertEquals(resp.status_code, 201)
resp = self.client.get(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
)
)
self.assertEquals(resp.status_code, 200)
received_response = json.loads(resp.content)
expected_response = division
# The order of returned objects depends on DB backend, so we have to
# manually sort them here before we compare them.
received_response['division'].sort(key=lambda d: d['service_uid'])
for i in range(len(expected_response['division'])):
self.assertEquals(
received_response['division'][i]['service_uid'],
expected_response['division'][i]['service_uid']
)
self.assertEquals(
received_response['division'][i]['environment'],
expected_response['division'][i]['environment']
)
self.assertEquals(
received_response['division'][i]['percent'],
expected_response['division'][i]['percent']
)
def test_for_error_when_service_env_does_not_exist(self):
self.assertEquals(
ServiceEnvironment.objects.filter(
service__ci_uid=self.service2_uid,
environment__name=self.service1_env_name,
).count(),
0
)
division = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
# Non-existing service environment (although such service
# and env exist).
"service_uid": self.service2_uid,
"environment": self.service1_env_name,
"percent": 40.0,
}
]
}
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(division),
content_type='application/json',
)
self.assertEquals(resp.status_code, 400)
self.assertIn('Service environment', resp.content)
self.assertIn('does not exist', resp.content)
self.assertIn(self.service2_uid, resp.content)
self.assertIn(self.service1_env_name, resp.content)
def test_for_error_when_percents_doesnt_sum_up_to_100(self):
# We are testing here *both* validation of `percent` field, and the
# correctness of saved values.
division_over_100 = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
"service_uid": self.service2_uid,
"environment": self.service2_env_name,
"percent": 30.0,
},
{
"service_uid": self.service3_uid,
"environment": self.service3_env_name,
"percent": 20.0,
}
]
}
self.assertEquals(TeamServiceEnvironmentPercent.objects.count(), 0)
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(division_over_100),
content_type='application/json',
)
self.assertEquals(resp.status_code, 400)
self.assertEquals(TeamServiceEnvironmentPercent.objects.count(), 0)
self.assertIn("Percents should sum to 100", resp.content)
self.assertIn(str(110.0), resp.content)
division_less_than_100 = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 30.0,
},
{
"service_uid": self.service2_uid,
"environment": self.service2_env_name,
"percent": 30.0,
},
{
"service_uid": self.service3_uid,
"environment": self.service3_env_name,
"percent": 20.0,
}
]
}
self.assertEquals(TeamServiceEnvironmentPercent.objects.count(), 0)
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(division_less_than_100),
content_type='application/json',
)
self.assertEquals(resp.status_code, 400)
self.assertEquals(TeamServiceEnvironmentPercent.objects.count(), 0)
self.assertIn("Percents should sum to 100", resp.content)
self.assertIn(str(80.0), resp.content)
def test_for_success_when_percents_sum_up_to_100(self):
division = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
"service_uid": self.service2_uid,
"environment": self.service2_env_name,
"percent": 20.0,
},
{
"service_uid": self.service3_uid,
"environment": self.service3_env_name,
"percent": 20.0,
}
]
}
self.assertEquals(TeamServiceEnvironmentPercent.objects.count(), 0)
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(division),
content_type='application/json',
)
self.assertEquals(resp.status_code, 201)
self.assertEquals(TeamServiceEnvironmentPercent.objects.count(), 3)
percent_total = 0
for tsep in TeamServiceEnvironmentPercent.objects.all():
percent_total += tsep.percent
self.assertEquals(percent_total, 100)
def test_for_error_when_team_does_not_exist(self):
division = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
"service_uid": self.service2_uid,
"environment": self.service2_env_name,
"percent": 20.0,
},
{
"service_uid": self.service3_uid,
"environment": self.service3_env_name,
"percent": 20.0,
}
]
}
non_existing_team_id = 9999
self.assertFalse(
Team.objects.filter(id=non_existing_team_id).exists()
)
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': non_existing_team_id,
},
),
json.dumps(division),
content_type='application/json',
)
self.assertEquals(resp.status_code, 404)
self.assertIn(str(non_existing_team_id), resp.content)
self.assertIn("does not exist", resp.content)
def test_if_only_team_manager_can_upload_divisions(self):
regular_user = get_user_model().objects.create_user(
'test2', '<EMAIL>', 'test2'
)
self.client.force_authenticate(regular_user)
division = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
"service_uid": self.service2_uid,
"environment": self.service2_env_name,
"percent": 20.0,
},
{
"service_uid": self.service3_uid,
"environment": self.service3_env_name,
"percent": 20.0,
}
]
}
url = reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
)
payload = json.dumps(division)
resp = self.client.post(url, payload, content_type='application/json')
self.assertEquals(resp.status_code, 403)
self.assertEquals(TeamServiceEnvironmentPercent.objects.count(), 0)
# Let's promote regular_user to Owner and then to TeamManager (we
# silently assume that all team managers are also owners - but not
# the other way around).
TeamManager.objects.create(team=self.team, manager=regular_user)
resp = self.client.post(url, payload, content_type='application/json')
self.assertEquals(resp.status_code, 201)
self.assertEquals(TeamServiceEnvironmentPercent.objects.count(), 3)
def test_if_only_team_manager_can_fetch_divisions(self):
division = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
"service_uid": self.service2_uid,
"environment": self.service2_env_name,
"percent": 20.0,
},
{
"service_uid": self.service3_uid,
"environment": self.service3_env_name,
"percent": 20.0,
}
]
}
url = reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
)
# Let's upload initial data as a superuser.
resp = self.client.post(
url, json.dumps(division), content_type='application/json'
)
self.assertEquals(resp.status_code, 201)
self.assertEquals(TeamServiceEnvironmentPercent.objects.count(), 3)
# Then try to fetch this data as a regular user.
regular_user = get_user_model().objects.create_user(
'test2', '<EMAIL>', 'test2'
)
self.client.force_authenticate(regular_user)
resp = self.client.get(url)
self.assertEquals(resp.status_code, 403)
# Try again with regular_user promoted to TeamManager.
TeamManager.objects.create(team=self.team, manager=regular_user)
resp = self.client.get(url)
self.assertEquals(resp.status_code, 200)
def test_if_uploaded_division_overwrites_previous_one_for_same_team_and_date(self): # noqa
self.assertEquals(TeamServiceEnvironmentPercent.objects.count(), 0)
division1 = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
"service_uid": self.service2_uid,
"environment": self.service2_env_name,
"percent": 40.0,
}
]
}
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(division1),
content_type='application/json',
)
self.assertEquals(resp.status_code, 201)
self.assertEquals(TeamServiceEnvironmentPercent.objects.count(), 2)
service_uids = set()
env_names = set()
percents = set()
for tsep in TeamServiceEnvironmentPercent.objects.all():
service_uids.add(tsep.service_environment.service.ci_uid)
env_names.add(tsep.service_environment.environment.name)
percents.add(tsep.percent)
self.assertEquals(service_uids, {self.service1_uid, self.service2_uid})
self.assertEquals(
env_names, {self.service1_env_name, self.service2_env_name}
)
self.assertEquals(percents, {60, 40})
division2 = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 70.0,
},
{
"service_uid": self.service3_uid,
"environment": self.service3_env_name,
"percent": 30.0,
}
]
}
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(division2),
content_type='application/json',
)
self.assertEquals(resp.status_code, 201)
self.assertEquals(TeamServiceEnvironmentPercent.objects.count(), 2)
service_uids = set()
env_names = set()
percents = set()
for tsep in TeamServiceEnvironmentPercent.objects.all():
service_uids.add(tsep.service_environment.service.ci_uid)
env_names.add(tsep.service_environment.environment.name)
percents.add(tsep.percent)
self.assertEquals(service_uids, {self.service1_uid, self.service3_uid})
self.assertEquals(
env_names, {self.service1_env_name, self.service3_env_name}
)
self.assertEquals(percents, {70, 30})
def test_for_error_when_service_does_not_exist(self):
non_existing_service_uid = 'fake_uid'
self.assertEquals(
Service.objects.filter(ci_uid=non_existing_service_uid).count(),
0
)
division = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
"service_uid": non_existing_service_uid,
"environment": self.service2_env_name,
"percent": 40.0,
}
]
}
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(division),
content_type='application/json',
)
self.assertEquals(resp.status_code, 400)
# Scrooge checks for the existence of a given service
# indirectly, i.e. by checking if service env exists - hence
# assertion below.
self.assertIn("Service environment", resp.content)
self.assertIn("does not exist", resp.content)
self.assertIn(non_existing_service_uid, resp.content)
def test_for_error_when_service_uid_is_missing(self):
division = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
"service_uid": self.service2_uid,
"environment": self.service2_env_name,
"percent": 20.0,
},
{
"environment": self.service3_env_name,
"percent": 20.0,
}
]
}
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(division),
content_type='application/json',
)
self.assertEquals(resp.status_code, 400)
self.assertIn("service_uid", resp.content)
self.assertIn("field is required", resp.content)
def test_for_error_when_environment_is_missing(self):
division = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
"service_uid": self.service2_uid,
"environment": self.service2_env_name,
"percent": 20.0,
},
{
"service_uid": self.service3_uid,
"percent": 20.0,
}
]
}
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(division),
content_type='application/json',
)
self.assertEquals(resp.status_code, 400)
self.assertIn("environment", resp.content)
self.assertIn("field is required", resp.content)
def test_for_error_when_environment_does_not_exist(self):
non_existing_env_name = 'fake_uid'
self.assertEquals(
Environment.objects.filter(name=non_existing_env_name).count(),
0
)
division = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
"service_uid": self.service2_uid,
"environment": non_existing_env_name,
"percent": 40.0,
}
]
}
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(division),
content_type='application/json',
)
self.assertEquals(resp.status_code, 400)
# Scrooge checks for the existence of a given environment
# indirectly, i.e. by checking if service env exists - hence
# assertion below.
self.assertIn("Service environment", resp.content)
self.assertIn("does not exist", resp.content)
self.assertIn(non_existing_env_name, resp.content)
def test_for_error_when_percent_is_missing(self):
division = {
"division": [
{
"service_uid": self.service1_uid,
"environment": self.service1_env_name,
"percent": 60.0,
},
{
"service_uid": self.service2_uid,
"environment": self.service2_env_name,
"percent": 20.0,
},
{
"service_uid": self.service3_uid,
"environment": self.service3_env_name,
}
]
}
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(division),
content_type='application/json',
)
self.assertEquals(resp.status_code, 400)
self.assertIn("percent", resp.content)
self.assertIn("field is required", resp.content)
def test_for_error_when_division_is_empty_list_or_none(self):
for d in [{'division': []}, {'division': None}]:
resp = self.client.post(
reverse(
'team_time_division',
kwargs={
'year': self.date.year,
'month': self.date.month,
'team_id': self.team.id,
},
),
json.dumps(d),
| |
def _create(cls, lims, creation_tag=None, udfs=None, **kwargs):
"""Create an instance from attributes and return it"""
if not udfs:
udfs={}
instance = cls(lims, _create_new=True)
if creation_tag:
instance.root = ElementTree.Element(nsmap(cls._PREFIX + ':' + creation_tag))
elif cls._TAG:
instance.root = ElementTree.Element(nsmap(cls._PREFIX + ':' + cls._TAG))
else:
instance.root = ElementTree.Element(nsmap(cls._PREFIX + ':' + cls.__name__.lower()))
for key in udfs:
instance.udf[key]=udfs[key]
for attribute in kwargs:
if hasattr(instance, attribute):
setattr(instance, attribute, kwargs.get(attribute))
else:
raise TypeError("%s create: got an unexpected keyword argument '%s'" % (cls.__name__, attribute))
return instance
@classmethod
def create(cls, lims, creation_tag=None, **kwargs):
"""Create an instance from attributes then post it to the LIMS"""
instance = cls._create(lims, creation_tag=creation_tag, **kwargs)
data = lims.tostring(ElementTree.ElementTree(instance.root))
instance.root = lims.post(uri=lims.get_uri(cls._URI), data=data)
instance._uri = instance.root.attrib['uri']
return instance
class Instrument(Entity):
"""Lab Instrument
"""
_URI = "instruments"
_TAG = "instrument"
_PREFIX = "inst"
name = StringDescriptor('name')
type = StringDescriptor('type')
serial_number = StringDescriptor('serial-number')
expiry_date = StringDescriptor('expiry-date')
archived = BooleanDescriptor('archived')
class Lab(Entity):
"Lab; container of researchers."
_URI = 'labs'
_PREFIX = 'lab'
name = StringDescriptor('name')
billing_address = StringDictionaryDescriptor('billing-address')
shipping_address = StringDictionaryDescriptor('shipping-address')
udf = UdfDictionaryDescriptor()
udt = UdtDictionaryDescriptor()
externalids = ExternalidListDescriptor()
website = StringDescriptor('website')
class Researcher(Entity):
"Person; client scientist or lab personnel. Associated with a lab."
_URI = 'researchers'
_PREFIX = 'res'
first_name = StringDescriptor('first-name')
last_name = StringDescriptor('last-name')
phone = StringDescriptor('phone')
fax = StringDescriptor('fax')
email = StringDescriptor('email')
initials = StringDescriptor('initials')
lab = EntityDescriptor('lab', Lab)
udf = UdfDictionaryDescriptor()
udt = UdtDictionaryDescriptor()
externalids = ExternalidListDescriptor()
# credentials XXX
username = NestedStringDescriptor('username', 'credentials')
account_locked = NestedBooleanDescriptor('account-locked', 'credentials')
@property
def name(self):
return "%s %s" % (self.first_name, self.last_name)
class Permission(Entity):
"""A Clarity permission. Only supports GET"""
name = StringDescriptor('name')
action = StringDescriptor('action')
description = StringDescriptor('description')
class Role(Entity):
"""Clarity Role, hosting permissions"""
name = StringDescriptor('name')
researchers = NestedEntityListDescriptor('researcher', Researcher, 'researchers')
permissions = NestedEntityListDescriptor('permission', Permission, 'permissions')
class Reagent_label(Entity):
"""Reagent label element"""
reagent_label = StringDescriptor('reagent-label')
class Note(Entity):
"Note attached to a project or a sample."
content = StringDescriptor(None) # root element
class File(Entity):
"File attached to a project or a sample."
attached_to = StringDescriptor('attached-to')
content_location = StringDescriptor('content-location')
original_location = StringDescriptor('original-location')
is_published = BooleanDescriptor('is-published')
class Project(Entity):
"Project concerning a number of samples; associated with a researcher."
_URI = 'projects'
_TAG = 'project'
_PREFIX = 'prj'
name = StringDescriptor('name')
open_date = StringDescriptor('open-date')
close_date = StringDescriptor('close-date')
invoice_date = StringDescriptor('invoice-date')
researcher = EntityDescriptor('researcher', Researcher)
udf = UdfDictionaryDescriptor()
udt = UdtDictionaryDescriptor()
files = EntityListDescriptor(nsmap('file:file'), File)
externalids = ExternalidListDescriptor()
# permissions XXX
class Sample(Entity):
"Customer's sample to be analyzed; associated with a project."
_URI = 'samples'
_TAG = 'sample'
_PREFIX = 'smp'
name = StringDescriptor('name')
date_received = StringDescriptor('date-received')
date_completed = StringDescriptor('date-completed')
project = EntityDescriptor('project', Project)
submitter = EntityDescriptor('submitter', Researcher)
# artifact: defined below
udf = UdfDictionaryDescriptor()
udt = UdtDictionaryDescriptor()
notes = EntityListDescriptor('note', Note)
files = EntityListDescriptor(nsmap('file:file'), File)
externalids = ExternalidListDescriptor()
# biosource XXX
@classmethod
def create(cls, lims, container, position, udfs=None, **kwargs):
"""Create an instance of Sample from attributes then post it to the LIMS"""
if udfs is None:
udfs = {}
if not isinstance(container, Container):
raise TypeError('%s is not of type Container'%container)
instance = super(Sample, cls)._create(lims, creation_tag='samplecreation',udfs=udfs, **kwargs)
location = ElementTree.SubElement(instance.root, 'location')
ElementTree.SubElement(location, 'container', dict(uri=container.uri))
position_element = ElementTree.SubElement(location, 'value')
position_element.text = position
data = lims.tostring(ElementTree.ElementTree(instance.root))
instance.root = lims.post(uri=lims.get_uri(cls._URI), data=data)
instance._uri = instance.root.attrib['uri']
return instance
class Containertype(Entity):
"Type of container for analyte artifacts."
_TAG = 'container-type'
_URI = 'containertypes'
_PREFIX = 'ctp'
name = StringAttributeDescriptor('name')
calibrant_wells = StringListDescriptor('calibrant-well')
unavailable_wells = StringListDescriptor('unavailable-well')
x_dimension = DimensionDescriptor('x-dimension')
y_dimension = DimensionDescriptor('y-dimension')
class Container(Entity):
"Container for analyte artifacts."
_URI = 'containers'
_TAG = 'container'
_PREFIX = 'con'
name = StringDescriptor('name')
type = EntityDescriptor('type', Containertype)
occupied_wells = IntegerDescriptor('occupied-wells')
placements = PlacementDictionaryDescriptor('placement')
udf = UdfDictionaryDescriptor()
udt = UdtDictionaryDescriptor()
state = StringDescriptor('state')
def get_placements(self):
"""Get the dictionary of locations and artifacts
using the more efficient batch call."""
result = self.placements.copy()
self.lims.get_batch(list(result.values()))
return result
def delete(self):
self.lims.delete(self.uri)
class Udfconfig(Entity):
"Instance of field type (cnf namespace)."
_URI = 'configuration/udfs'
name = StringDescriptor('name')
attach_to_name = StringDescriptor('attach-to-name')
attach_to_category = StringDescriptor('attach-to-category')
show_in_lablink = BooleanDescriptor('show-in-lablink')
allow_non_preset_values = BooleanDescriptor('allow-non-preset-values')
first_preset_is_default_value = BooleanDescriptor('first-preset-is-default-value')
show_in_tables = BooleanDescriptor('show-in-tables')
is_editable = BooleanDescriptor('is-editable')
is_required = BooleanDescriptor('is-required')
is_deviation = BooleanDescriptor('is-deviation')
is_controlled_vocabulary = BooleanDescriptor('is-controlled-vocabulary')
presets = StringListDescriptor('preset')
class Processtype(Entity):
_TAG = 'process-type'
_URI = 'processtypes'
_PREFIX = 'ptp'
def __init__(self, lims, uri=None, id=None, _create_new=False):
super(Processtype, self).__init__(lims, uri, id, _create_new)
self.parameters = ProcessTypeParametersDescriptor(self)
name = StringAttributeDescriptor('name')
field_definition = EntityListDescriptor('field-definition', Udfconfig)
process_inputs = ProcessTypeProcessInputDescriptor()
process_outputs = ProcessTypeProcessOutputDescriptor()
process_type_attribute = NamedStringDescriptor('process-type-attribute')
@property
def process_input(self):
return self.process_inputs[0]
class ControlType(Entity):
_URI = "controltypes"
_TAG = "control-type"
_PREFIX = 'ctrltp'
name = StringAttributeDescriptor('name')
supplier = StringDescriptor('supplier')
archived = BooleanDescriptor('archived')
single_step = BooleanDescriptor('single_step')
class Process(Entity):
"Process (instance of Processtype) executed producing ouputs from inputs."
_URI = 'processes'
_PREFIX = 'prc'
type = EntityDescriptor('type', Processtype)
date_run = StringDescriptor('date-run')
technician = EntityDescriptor('technician', Researcher)
protocol_name = StringDescriptor('protocol-name')
input_output_maps = InputOutputMapList()
udf = UdfDictionaryDescriptor()
udt = UdtDictionaryDescriptor()
files = EntityListDescriptor(nsmap('file:file'), File)
process_parameter = StringDescriptor('process-parameter')
instrument = EntityDescriptor('instrument', Instrument)
# process_parameters XXX
def outputs_per_input(self, inart, ResultFile=False, SharedResultFile=False, Analyte=False):
"""Getting all the output artifacts related to a particual input artifact"""
inouts = [io for io in self.input_output_maps if io[0]['limsid'] == inart]
if ResultFile:
inouts = [io for io in inouts if io[1]['output-type'] == 'ResultFile']
elif SharedResultFile:
inouts = [io for io in inouts if io[1]['output-type'] == 'SharedResultFile']
elif Analyte:
inouts = [io for io in inouts if io[1]['output-type'] == 'Analyte']
outs = [io[1]['uri'] for io in inouts]
return outs
def input_per_sample(self, sample):
"""gettiung all the input artifacts dereved from the specifyed sample"""
ins_all = self.all_inputs()
ins = []
for inp in ins_all:
for samp in inp.samples:
if samp.name == sample and inp not in ins:
ins.append(inp)
return ins
def all_inputs(self, unique=True, resolve=False):
"""Retrieving all input artifacts from input_output_maps
if unique is true, no duplicates are returned.
"""
# if the process has no input, that is not standard and we want to know about it
try:
ids = [io[0]['limsid'] for io in self.input_output_maps]
except TypeError:
logger.error("Process ", self, " has no input artifacts")
raise TypeError
if unique:
ids = list(frozenset(ids))
if resolve:
return self.lims.get_batch([Artifact(self.lims, id=id) for id in ids if id is not None])
else:
return [Artifact(self.lims, id=id) for id in ids if id is not None]
def all_outputs(self, unique=True, resolve=False):
"""Retrieving all output artifacts from input_output_maps
if unique is true, no duplicates are returned.
"""
# Given how ids is structured, io[1] might be None : some process don't have an output.
ids = [io[1]['limsid'] for io in self.input_output_maps if io[1] is not None]
if unique:
ids = list(frozenset(ids))
if resolve:
return self.lims.get_batch([Artifact(self.lims, id=id) for id in ids if id is not None])
else:
return [Artifact(self.lims, id=id) for id in ids if id is not None]
def shared_result_files(self):
"""Retreve all resultfiles of output-generation-type PerAllInputs."""
artifacts = self.all_outputs(unique=True)
return [a for a in artifacts if a.output_type == 'SharedResultFile']
def result_files(self):
"""Retreve all resultfiles of output-generation-type perInput."""
artifacts = self.all_outputs(unique=True)
return [a for a in artifacts if a.output_type == 'ResultFile']
def analytes(self):
"""Retreving the output Analytes of the process, if existing.
If the process is not producing any output analytes, the input
analytes are returned. Input/Output is returned as a information string.
Makes aggregate processes and normal processes look the same."""
info = 'Output'
artifacts = self.all_outputs(unique=True)
analytes = [a for a in artifacts if a.type == 'Analyte']
if len(analytes) == 0:
artifacts = self.all_inputs(unique=True)
analytes = [a for a in artifacts if a.type == 'Analyte']
info = 'Input'
return analytes, info
def parent_processes(self):
"""Retrieving all parent processes through the input artifacts"""
return [i_a.parent_process for i_a in self.all_inputs(unique=True)]
def output_containers(self):
"""Retrieve all unique output containers"""
cs = []
for o_a in self.all_outputs(unique=True):
if o_a.container:
cs.append(o_a.container)
return list(frozenset(cs))
@property
def step(self):
"""Retrive the Step coresponding to this process. They share the same id"""
return Step(self.lims, id=self.id)
class Artifact(Entity):
"Any process input or output; analyte or file."
_URI = 'artifacts'
_TAG = 'artifact'
_PREFIX = 'art'
name = StringDescriptor('name')
type = StringDescriptor('type')
output_type = StringDescriptor('output-type')
parent_process = EntityDescriptor('parent-process', Process)
volume = StringDescriptor('volume')
concentration = StringDescriptor('concentration')
qc_flag = StringDescriptor('qc-flag')
location = LocationDescriptor('location')
working_flag = BooleanDescriptor('working-flag')
samples = EntityListDescriptor('sample', Sample)
udf = UdfDictionaryDescriptor()
files = EntityListDescriptor(nsmap('file:file'), File)
reagent_labels = | |
html and xhtml are found above
_type = Type.INLINEXBRL
else:
for pluginMethod in pluginClassMethods("ModelDocument.IdentifyType"):
_identifiedType = pluginMethod(modelXbrl, rootNode, filepath)
if _identifiedType is not None:
_type, _class, rootNode = _identifiedType
break
if _type is None:
_type = Type.UnknownXML
nestedInline = None
for htmlElt in rootNode.iter(tag="{http://www.w3.org/1999/xhtml}html"):
nestedInline = htmlElt
break
if nestedInline is None:
for htmlElt in rootNode.iter(tag="{http://www.w3.org/1999/xhtml}xhtml"):
nestedInline = htmlElt
break
if nestedInline is not None:
if (# not a valid test: XbrlConst.ixbrl in nestedInline.nsmap.values() or
any(e is not None for e in rootNode.iter(*XbrlConst.ixbrlTags))):
_type = Type.INLINEXBRL
rootNode = nestedInline
modelDocument = _class(modelXbrl, _type, normalizedUri, filepath, xmlDocument)
rootNode.init(modelDocument)
modelDocument.parser = _parser # needed for XmlUtil addChild's makeelement
modelDocument.parserLookupName = _parserLookupName
modelDocument.parserLookupClass = _parserLookupClass
modelDocument.xmlRootElement = modelDocument.targetXbrlRootElement = rootNode
modelDocument.schemaLocationElements.add(rootNode)
modelDocument.documentEncoding = _encoding
if isEntry or isDiscovered:
modelDocument.inDTS = True
# discovery (parsing)
if any(pluginMethod(modelDocument)
for pluginMethod in pluginClassMethods("ModelDocument.Discover")):
pass # discovery was performed by plug-in, we're done
elif _type == Type.SCHEMA:
modelDocument.schemaDiscover(rootNode, isIncluded, isSupplemental, namespace)
elif _type == Type.LINKBASE:
modelDocument.linkbaseDiscover(rootNode)
elif _type == Type.INSTANCE:
modelDocument.instanceDiscover(rootNode)
elif _type == Type.INLINEXBRL:
modelDocument.inlineXbrlDiscover(rootNode)
elif _type == Type.VERSIONINGREPORT:
modelDocument.versioningReportDiscover(rootNode)
elif _type == Type.TESTCASESINDEX:
modelDocument.testcasesIndexDiscover(xmlDocument)
elif _type == Type.TESTCASE:
modelDocument.testcaseDiscover(rootNode)
elif _type == Type.REGISTRY:
modelDocument.registryDiscover(rootNode)
elif _type == Type.XPATHTESTSUITE:
modelDocument.xPathTestSuiteDiscover(rootNode)
elif _type == Type.VERSIONINGREPORT:
modelDocument.versioningReportDiscover(rootNode)
elif _type == Type.RSSFEED:
modelDocument.rssFeedDiscover(rootNode)
if isEntry or _type == Type.INLINEXBRL: # inline doc set members may not be entry but may have processing instructions
for pi in modelDocument.processingInstructions:
if pi.target == "arelle-unit-test":
modelXbrl.arelleUnitTests[pi.get("location")] = pi.get("action")
if isEntry:
while modelXbrl.schemaDocsToValidate:
doc = modelXbrl.schemaDocsToValidate.pop()
XmlValidateSchema.validate(doc, doc.xmlRootElement, doc.targetNamespace) # validate schema elements
if hasattr(modelXbrl, "ixdsHtmlElements"):
inlineIxdsDiscover(modelXbrl, modelDocument) # compile cross-document IXDS references
if isEntry or isSupplemental:
# re-order base set keys for entry point or supplemental linkbase addition
modelXbrl.baseSets = OrderedDefaultDict( # order by linkRole, arcRole of key
modelXbrl.baseSets.default_factory,
sorted(modelXbrl.baseSets.items(), key=lambda i: (i[0][0] or "",i[0][1] or "")))
return modelDocument
def loadSchemalocatedSchema(modelXbrl, element, relativeUrl, namespace, baseUrl):
if namespace == XbrlConst.xhtml: # block loading xhtml as a schema (e.g., inline which is xsd validated instead)
return None
#importSchemaLocation = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(relativeUrl, baseUrl)
#doc = load(modelXbrl, relativeUrl, isIncluded=False, isDiscovered=False, namespace=namespace, referringElement=element, referringElementUrl=baseUrl)
doc = load(modelXbrl, relativeUrl, isIncluded=False, isDiscovered=False, namespace=namespace, referringElement=element, base=baseUrl)
if doc:
if doc.targetNamespace != namespace:
modelXbrl.error("xmlSchema1.4.2.3:refSchemaNamespace",
_("SchemaLocation of %(fileName)s expected namespace %(namespace)s found targetNamespace %(targetNamespace)s"),
modelObject=element, fileName=baseUrl,
namespace=namespace, targetNamespace=doc.targetNamespace)
else:
doc.inDTS = False
return doc
def create(modelXbrl, type, uri, schemaRefs=None, isEntry=False, initialXml=None, initialComment=None, base=None, discover=True, documentEncoding="utf-8"):
"""Returns a new modelDocument, created from scratch, with any necessary header elements
(such as the schema, instance, or RSS feed top level elements)
:param type: type of model document (value of ModelDocument.Types, an integer)
:type type: Types
:param schemaRefs: list of URLs when creating an empty INSTANCE, to use to discover (load) the needed DTS modelDocument objects.
:type schemaRefs: [str]
:param isEntry is True when creating an entry (e.g., instance)
:type isEntry: bool
:param initialXml is initial xml content for xml documents
:type isEntry: str
"""
normalizedUri = modelXbrl.modelManager.cntlr.webCache.normalizeUrl(uri, base)
if isEntry:
modelXbrl.uri = normalizedUri
modelXbrl.entryLoadingUrl = normalizedUri
modelXbrl.uriDir = os.path.dirname(normalizedUri)
for i in range(modelXbrl.modelManager.disclosureSystem.maxSubmissionSubdirectoryEntryNesting):
modelXbrl.uriDir = os.path.dirname(modelXbrl.uriDir)
filepath = modelXbrl.modelManager.cntlr.webCache.getfilename(normalizedUri, filenameOnly=True)
if initialComment:
initialComment = "<!--" + initialComment + "-->"
# XML document has nsmap root element to replace nsmap as new xmlns entries are required
if initialXml and type in (Type.INSTANCE, Type.SCHEMA, Type.LINKBASE, Type.RSSFEED):
Xml = '<nsmap>{}{}</nsmap>'.format(initialComment or '', initialXml or '')
elif type == Type.INSTANCE:
# modelXbrl.uriDir = os.path.dirname(normalizedUri)
Xml = ('<nsmap>{}'
'<xbrl xmlns="http://www.xbrl.org/2003/instance"'
' xmlns:link="http://www.xbrl.org/2003/linkbase"'
' xmlns:xlink="http://www.w3.org/1999/xlink">').format(initialComment)
if schemaRefs:
for schemaRef in schemaRefs:
Xml += '<link:schemaRef xlink:type="simple" xlink:href="{0}"/>'.format(schemaRef.replace("\\","/"))
Xml += '</xbrl></nsmap>'
elif type == Type.SCHEMA:
Xml = ('<nsmap>{}<schema xmlns="http://www.w3.org/2001/XMLSchema" /></nsmap>').format(initialComment)
elif type == Type.RSSFEED:
Xml = '<nsmap><rss version="2.0" /></nsmap>'
elif type in (Type.DTSENTRIES, Type.HTML):
Xml = None
else:
type = Type.UnknownXML
Xml = '<nsmap>{0}</nsmap>'.format(initialXml or '')
if Xml:
import io
file = io.StringIO(Xml)
_parser, _parserLookupName, _parserLookupClass = parser(modelXbrl,normalizedUri)
xmlDocument = etree.parse(file,parser=_parser,base_url=filepath)
file.close()
else:
xmlDocument = None
if type == Type.RSSFEED:
from arelle.ModelRssObject import ModelRssObject
modelDocument = ModelRssObject(modelXbrl, type, uri, filepath, xmlDocument)
else:
modelDocument = ModelDocument(modelXbrl, type, normalizedUri, filepath, xmlDocument)
if Xml:
modelDocument.parser = _parser # needed for XmlUtil addChild's makeelement
modelDocument.parserLookupName = _parserLookupName
modelDocument.parserLookupClass = _parserLookupClass
modelDocument.documentEncoding = documentEncoding
rootNode = xmlDocument.getroot()
rootNode.init(modelDocument)
if xmlDocument:
for semanticRoot in rootNode.iterchildren():
if isinstance(semanticRoot, ModelObject):
modelDocument.xmlRootElement = modelDocument.targetXbrlRootElement = semanticRoot
break
# init subtree
for elt in xmlDocument.iter():
if isinstance(elt, ModelObject):
elt.init(modelDocument)
else:
xmlDocument = None
if type == Type.INSTANCE and discover:
modelDocument.instanceDiscover(modelDocument.xmlRootElement)
elif type == Type.RSSFEED and discover:
modelDocument.rssFeedDiscover(modelDocument.xmlRootElement)
elif type == Type.SCHEMA:
modelDocument.targetNamespace = None
modelDocument.isQualifiedElementFormDefault = False
modelDocument.isQualifiedAttributeFormDefault = False
modelDocument.definesUTR = False
return modelDocument
class Type:
"""
.. class:: Type
Static class of Enumerated type representing modelDocument type
"""
UnknownXML=0
UnknownNonXML=1
UnknownTypes=1 # to test if any unknown type, use <= Type.UnknownTypes
firstXBRLtype=2 # first filetype that is XBRL and can hold a linkbase, etc inside it
SCHEMA=2
LINKBASE=3
INSTANCE=4
INLINEXBRL=5
lastXBRLtype=5 # first filetype that is XBRL and can hold a linkbase, etc inside it
DTSENTRIES=6 # multiple schema/linkbase Refs composing a DTS but not from an instance document
INLINEXBRLDOCUMENTSET=7
VERSIONINGREPORT=8
TESTCASESINDEX=9
TESTCASE=10
REGISTRY=11
REGISTRYTESTCASE=12
XPATHTESTSUITE=13
RSSFEED=14
ARCSINFOSET=15
FACTDIMSINFOSET=16
HTML=17
TESTCASETYPES = (TESTCASESINDEX, TESTCASE, REGISTRY, REGISTRYTESTCASE, XPATHTESTSUITE)
typeName = ("unknown XML",
"unknown non-XML",
"schema",
"linkbase",
"instance",
"inline XBRL instance",
"entry point set",
"inline XBRL document set",
"versioning report",
"testcases index",
"testcase",
"registry",
"registry testcase",
"xpath test suite",
"RSS feed",
"arcs infoset",
"fact dimensions infoset",
"html non-XBRL")
def identify(filesource, filepath):
_type = Type.UnknownNonXML
file, = filesource.file(filepath, stripDeclaration=True, binary=True)
try:
_rootElt = True
for _event, elt in etree.iterparse(file, events=("start",), recover=True, huge_tree=True):
if _rootElt:
_rootElt = False
_type = {"{http://www.xbrl.org/2003/instance}xbrl": Type.INSTANCE,
"{http://www.xbrl.org/2003/linkbase}linkbase": Type.LINKBASE,
"{http://www.w3.org/2001/XMLSchema}schema": Type.SCHEMA}.get(elt.tag, Type.UnknownXML)
if _type == Type.UnknownXML and elt.tag.endswith("html"):
pass # following is not a valid test:
# if XbrlConst.ixbrl in elt.nsmap.values():
# _type = Type.INLINEXBRL
# break # stop parsing
# else fall through to element scan for ix11 element
else:
break # stop parsing
if XbrlConst.ixbrlTagPattern.match(elt.tag):
_type = Type.INLINEXBRL
break
except Exception as err:
if not _rootElt: # if _rootElt is false then a root element was found and it's some kind of xml
_type = Type.UnknownXML
if filesource.cntlr:
filesource.cntlr.addToLog("%(error)s",
messageCode="arelle:fileIdentificationError",
messageArgs={"error":err}, file=filepath)
file.close()
return _type
# schema elements which end the include/import scah
schemaBottom = {"element", "attribute", "notation", "simpleType", "complexType", "group", "attributeGroup"}
fractionParts = {"{http://www.xbrl.org/2003/instance}numerator",
"{http://www.xbrl.org/2003/instance}denominator"}
class ModelDocument:
"""
.. class:: ModelDocment(modelXbrl, type, uri, filepath, xmlDocument)
The modelDocument performs discovery and initialization when loading documents.
For instances, schema and linkbase references are resolved, as well as non-DTS schema locations needed
to ensure PSVI-validated XML elements in the instance document (for formula processing).
For DTSes, schema includes and imports are resolved, linkbase references discovered, and
concepts made accessible by qname by the modelXbrl and ID at the modelDocument scope.
Testcase documents (and their indexing files) are loaded as modelDocument objects.
Specialized modelDocuments are the versioning report, which must discover from and to DTSes,
and an RSS feed, which has a unique XML structure.
:param modelXbrl: The ModelXbrl (DTS) object owning this modelDocument.
:type modelXbrl: ModelXbrl
:param uri: The document's source entry URI (such as web site URL)
:type uri: str
:param filepath: The file path of the source for the document (local file or web cache file name)
:type filepath: str
:param xmlDocument: lxml parsed xml document tree model of lxml proxy objects
:type xmlDocument: lxml document
.. attribute:: modelDocument
Self (provided for consistency with modelObjects)
.. attribute:: modelXbrl
The owning modelXbrl
.. attribute:: type
The enumerated document type
.. attribute:: uri
Uri as discovered
.. attribute:: filepath
File path as loaded (e.g., from web cache on local drive)
.. attribute:: basename
Python basename (last segment of file path)
.. attribute:: xmlDocument
The lxml tree model of xml proxies
.. attribute:: targetNamespace
Target namespace (if a schema)
.. attribute:: objectIndex
Position in lxml objects | |
<reponame>Malekhy/ws2122-lspm
'''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.util import constants, exec_utils, xes_constants
from pm4py.streaming.util.dictio import generator
import logging
from pm4py.objects.petri_net.obj import PetriNet, Marking
from pm4py.streaming.algo.interface import StreamingAlgorithm
from pm4py.objects.petri_net import semantics
from copy import copy
import sys
class Parameters:
DICT_VARIANT = "dict_variant"
DICT_ID = "dict_id"
CASE_DICT_ID = "case_dict_id"
MISSING_DICT_ID = "missing_dict_id"
REMAINING_DICT_ID = "remaining_dict_id"
CASE_ID_KEY = constants.PARAMETER_CONSTANT_CASEID_KEY
ACTIVITY_KEY = constants.PARAMETER_CONSTANT_ACTIVITY_KEY
MAXIMUM_ITERATIONS_INVISIBLES = "maximum_iterations_invisibles"
class TbrStreamingConformance(StreamingAlgorithm):
def __init__(self, net, im, fm, parameters=None):
"""
Initialize the token-based replay streaming conformance
Parameters
--------------
net
Petri net
im
Initial marking
fm
Final marking
"""
if parameters is None:
parameters = {}
self.case_id_key = exec_utils.get_param_value(Parameters.CASE_ID_KEY, parameters, constants.CASE_CONCEPT_NAME)
self.activity_key = exec_utils.get_param_value(Parameters.ACTIVITY_KEY, parameters,
xes_constants.DEFAULT_NAME_KEY)
self.maximum_iterations_invisibles = exec_utils.get_param_value(Parameters.MAXIMUM_ITERATIONS_INVISIBLES,
parameters, 10)
self.net = net
self.im = im
self.fm = fm
self.places_inv_dict = {x.name: x for x in net.places}
self.activities = list(set(x.label for x in self.net.transitions))
self.dictio_spaths = self.get_paths_net()
self.build_dictionaries(parameters=parameters)
StreamingAlgorithm.__init__(self)
def build_dictionaries(self, parameters):
"""
Builds the dictionaries needed to store the information during the replay
Parameters
---------------
parameters
Parameters:
- Parameters.DICT_VARIANT: type of dictionary to use
- Parameters.CASE_DICT_ID: identifier of the dictionary hosting the markings (0)
- Parameters.MISSING_DICT_ID: identifier of the dictionary hosting the missing tokens (1)
- Parameters.REMAINING_DICT_ID: identifier of the dictionary hosting the remaining tokens (2)
"""
dict_variant = exec_utils.get_param_value(Parameters.DICT_VARIANT, parameters, generator.Variants.THREAD_SAFE)
case_dict_id = exec_utils.get_param_value(Parameters.CASE_DICT_ID, parameters, 0)
missing_dict_id = exec_utils.get_param_value(Parameters.MISSING_DICT_ID, parameters, 1)
remaining_dict_id = exec_utils.get_param_value(Parameters.REMAINING_DICT_ID, parameters, 2)
parameters_case_dict = copy(parameters)
parameters_case_dict[Parameters.DICT_ID] = case_dict_id
parameters_missing = copy(parameters)
parameters_case_dict[Parameters.DICT_ID] = missing_dict_id
parameters_remaining = copy(parameters)
parameters_remaining[Parameters.DICT_ID] = remaining_dict_id
self.case_dict = generator.apply(variant=dict_variant, parameters=parameters_case_dict)
self.missing = generator.apply(variant=dict_variant, parameters=parameters_missing)
self.remaining = generator.apply(variant=dict_variant, parameters=parameters_remaining)
def get_paths_net(self):
"""
Gets the dictionary of shortest paths using invisibles transitions
Returns
---------------
dictio_spaths
Dictionary of shortest paths
"""
import networkx as nx
G = nx.DiGraph()
for pl in self.net.places:
G.add_node(pl)
for tr in self.net.transitions:
G.add_node(tr)
if tr.label is None:
for a in tr.out_arcs:
target_place = a.target
G.add_edge(tr, target_place)
for a in tr.in_arcs:
source_place = a.source
G.add_edge(source_place, tr)
shortest_path = nx.all_pairs_shortest_path(G)
dictio_spaths = {}
for el in shortest_path:
if type(el[0]) is PetriNet.Place:
for sel in el[1]:
spath = [x for x in el[1][sel][1:-2] if type(x) is PetriNet.Transition]
if spath:
if not el[0] in dictio_spaths:
dictio_spaths[el[0]] = {}
dictio_spaths[el[0]][sel] = spath
return dictio_spaths
def _process(self, event):
"""
Checks the event according to the TBR
Parameters
---------------
event
Event (dictionary)
Returns
---------------
boolean
Boolean value
"""
case = event[self.case_id_key] if self.case_id_key in event else None
activity = event[self.activity_key] if self.activity_key in event else None
if case is not None and activity is not None:
self.verify_tbr(self.encode_str(case), activity)
else:
self.message_case_or_activity_not_in_event(event)
def encode_str(self, stru):
"""
Encodes a string for storage in generic dictionaries
"""
return str(stru)
def encode_marking(self, mark):
"""
Encodes a marking for storage in generic dictionaries
"""
em = {}
for pl in mark:
em[pl.name] = mark[pl]
return str(em)
def decode_marking(self, ems):
"""
Decodes a marking from a generic dictionary
to a Marking object
"""
em = eval(ems)
mark = Marking()
for p in em:
mark[self.places_inv_dict[p]] = em[p]
return mark
def verify_tbr(self, case, activity):
"""
Verifies an activity happening in a case
Parameters
--------------
case
Case
activity
Activity
"""
if activity in self.activities:
if case not in self.case_dict:
self.case_dict[case] = self.encode_marking(copy(self.im))
self.missing[case] = 0
self.remaining[case] = 0
marking = self.decode_marking(self.case_dict[case])
new_marking = marking
prev_marking = None
correct_exec = False
numb_it = 0
while new_marking is not None and prev_marking != new_marking:
numb_it = numb_it + 1
if numb_it > self.maximum_iterations_invisibles:
break
enabled_transitions = semantics.enabled_transitions(self.net, new_marking)
matching_transitions = [x for x in enabled_transitions if x.label == activity]
if matching_transitions:
new_marking = semantics.weak_execute(matching_transitions[0], new_marking)
self.case_dict[case] = self.encode_marking(new_marking)
correct_exec = True
break
prev_marking = new_marking
new_marking = self.enable_trans_with_invisibles(new_marking, activity)
correct_exec = False
if correct_exec is False:
self.message_missing_tokens(activity, case)
# enables one of the matching transitions
matching_transitions = [x for x in self.net.transitions if x.label == activity]
t = matching_transitions[0]
for a in t.in_arcs:
pl = a.source
mark = a.weight
if pl not in marking or new_marking[pl] < mark:
self.missing[case] = int(self.missing[case]) + (mark - marking[pl])
marking[pl] = mark
new_marking = semantics.weak_execute(t, marking)
self.case_dict[case] = self.encode_marking(new_marking)
else:
self.message_activity_not_possible(activity, case)
def enable_trans_with_invisibles(self, marking, activity):
"""
Enables a visible transition (that is not enabled) through
invisible transitions
Parameters
----------------
marking
Marking
activity
Activity to enable
Returns
---------------
new_marking
New marking (where the transition CAN be enabled)
"""
corr_trans_to_act = [x for x in self.net.transitions if x.label == activity]
spath = None
spath_length = sys.maxsize
for pl in marking:
for tr in corr_trans_to_act:
if pl in self.dictio_spaths:
if tr in self.dictio_spaths[pl]:
new_path = self.dictio_spaths[pl][tr]
if len(new_path) < spath_length:
spath = new_path
spath_length = len(spath)
if spath is not None:
# try to fire the transitions
for tr in spath:
if tr in semantics.enabled_transitions(self.net, marking):
marking = semantics.weak_execute(tr, marking)
else:
return None
return marking
return None
def get_status(self, case):
"""
Gets the status of an open case
Parameters
----------------
case
Case
"""
if case in self.case_dict:
return {"marking": self.decode_marking(self.case_dict[case]), "missing": int(self.missing[case])}
else:
self.message_case_not_in_dictionary(case)
def terminate(self, case):
"""
Terminate a case, checking if the final marking is reached
Parameters
----------------
case
Case ID
Returns
---------------
dictio
Dictionary containing: the marking, the count of missing and remaining tokens
"""
case = self.encode_str(case)
if case in self.case_dict:
remaining = 0
if not self.decode_marking(self.case_dict[case]) == self.fm:
new_marking = self.reach_fm_with_invisibles(self.case_dict[case])
if new_marking is None:
new_marking = self.decode_marking(self.case_dict[case])
if not new_marking == self.fm:
self.message_final_marking_not_reached(case, new_marking)
fm_copy = copy(self.fm)
for m in fm_copy:
if not m in new_marking:
new_marking[m] = 0
self.missing[case] = int(self.missing[case]) + (fm_copy[m] - new_marking[m])
for m in new_marking:
if not m in fm_copy:
fm_copy[m] = 0
remaining += new_marking[m] - fm_copy[m]
missing = int(self.missing[case])
is_fit = missing == 0 and remaining == 0
ret = {"marking": self.decode_marking(self.case_dict[case]), "missing": missing, "remaining": remaining, "is_fit": is_fit}
del self.case_dict[case]
del self.missing[case]
del self.remaining[case]
return ret
else:
self.message_case_not_in_dictionary(case)
def terminate_all(self):
"""
Terminate all open cases
"""
cases = list(self.case_dict.keys())
for case in cases:
self.terminate(case)
def reach_fm_with_invisibles(self, marking):
"""
Reaches the final marking using invisible transitions
Parameters
--------------
marking
Marking
Returns
--------------
new_marking
New marking (hopely equal to the final marking)
"""
spath = None
spath_length = sys.maxsize
for pl in marking:
if pl in self.dictio_spaths:
for pl2 in self.fm:
if pl2 in self.dictio_spaths[pl]:
new_path = self.dictio_spaths[pl][pl2]
if len(new_path) < spath_length:
spath = new_path
spath_length = len(spath)
if spath is not None:
# try to fire the transitions
for tr in spath:
if tr in semantics.enabled_transitions(self.net, marking):
marking = semantics.weak_execute(tr, marking)
else:
return None
return marking
return None
def message_case_or_activity_not_in_event(self, event):
"""
Sends a message if the case or the activity are not
there in the event
"""
logging.error("case or activities are none! " + str(event))
def message_activity_not_possible(self, activity, case):
"""
Sends a message if the activity is not possible
according to the model
Parameters
---------------
activity
Activity
case
Case
"""
logging.error("the activity " + str(activity) + " is not possible according to the model! case: " + str(case))
def message_missing_tokens(self, activity, case):
"""
Sends a message if the insertion of missing
tokens occur
Parameters
---------------
activity
Activity
case
Case
"""
logging.error(
"the activity " + str(activity) + " could not be executed without inserting missing tokens! case: " + str(
case))
def message_case_not_in_dictionary(self, case):
"""
Sends a message if the provided case is not in the dictionary
Parameters
---------------
activity
Activity
case
Case
"""
logging.error("the case " + str(case) + " is not in the dictionary! case: " | |
<reponame>OHMC/productos-satelitales<filename>src/plotters_lib/plot.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import os
import logging
from pathlib import Path
import datetime
import time as t
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.patches import Rectangle
from mpl_toolkits.basemap import Basemap
from netCDF4 import Dataset
from osgeo import osr, gdal
from PIL import Image
from plotters_lib.cpt_convert import load_cpt
from config.constants import LOGO, SHAPEFILES, EXTENT, EXTENT_WEBMET, NOAA_GOES16_PATH
from config.logging_conf import GOES_LOGGER_NAME
from wrf_api.goes_api_ingest import post_img_to_api
logger = logging.getLogger(GOES_LOGGER_NAME)
matplotlib.use("agg")
# Define KM_PER_DEGREE
KM_PER_DEGREE = 111.32
# GOES-16 Extent (satellite projection) [llx, lly, urx, ury]
GOES16_EXTENT = [-5434894.885056,
-5434894.885056,
5434894.885056,
5434894.885056]
RESOLUTION = 1.
def get_geo_t(extent, nlines, ncols):
# Compute resolution based on data dimension
resx = (extent[2] - extent[0]) / ncols
resy = (extent[3] - extent[1]) / nlines
return [extent[0], resx, 0, extent[3], 0, -resy]
def get_metadatos(file):
nc = Dataset(file)
_metadatos = {'icanal': nc.variables['band_id'][:][0], 'cols': nc.variables['x'].shape[0],
'rows': nc.variables['y'].shape[0], 't_0': float(nc.variables['t'][0]),
't_start': nc.variables['t'].units[14:33], 'offset': nc.variables['CMI'].add_offset,
'scale': nc.variables['CMI'].scale_factor,
'proj': nc.variables['goes_imager_projection'].grid_mapping_name,
'lat_0': nc.variables['geospatial_lat_lon_extent'].getncattr('geospatial_lat_center'),
'lon_0': nc.variables['geospatial_lat_lon_extent'].getncattr('geospatial_lon_center'),
'h': nc.variables['goes_imager_projection'].getncattr('perspective_point_height'),
'a': nc.variables['goes_imager_projection'].getncattr('semi_major_axis'),
'b': nc.variables['goes_imager_projection'].getncattr('semi_minor_axis'),
'f': 1 / float(nc.variables['goes_imager_projection'].getncattr('inverse_flattening')),
'fecha_img': datetime.datetime.strptime(nc.time_coverage_start[:16], '%Y-%m-%dT%H:%M')}
nc.close()
return _metadatos
def print_shapes(
m: Basemap,
parallels=np.arange(-90, 90, 5),
meridians=np.arange(0, 360, 5),
prov=True,
dep=True
):
"""
Dibuja las linas de los mapas.
Parametros
----------
m : basemap
basemap al cual se le van a dibujar los shapes
parallels : np.arrange, opcional
Arreglo con las coordenadas donde van paralelos
Por defecto: np.arange(-90,90,5).
meridians : np.arrange, opcional
Arreglo con las coordenadas donde van meridianos
Por defecto: np.arange(0,360,5).
prov : Bool, opcional
Si prov=True, dibuja los contornos de las provincias
Por defecto: True.
dep : Bool, opcional
Si dep=True, dibuja los contornos de los departamentos
Por defecto: True.
"""
m.drawparallels(parallels, labels=[1, 0, 0, 0], color='#FFFFFF', fontsize=0)
m.drawmeridians(meridians, labels=[0, 0, 0, 1], color='#FFFFFF', fontsize=0)
if prov:
m.readshapefile(
shapefile=SHAPEFILES + '/provincias',
name='prov',
drawbounds=True,
zorder=None,
linewidth=0.60,
color='#FFFFFF',
antialiased=1
)
if dep:
m.readshapefile(
shapefile=SHAPEFILES + '/departamentos',
name='dep',
drawbounds=True,
zorder=None,
linewidth=0.20,
color='#808080',
antialiased=1
)
def convertir_negro_transparente(ruta):
"""
Convierte el color negro de una imagen a transparente.
"""
img = Image.open(ruta)
img = img.convert("RGBA")
pixdata = img.load()
width, height = img.size
for y in range(height):
for x in range(width):
if pixdata[x, y] == (0, 0, 0, 255):
pixdata[x, y] = (0, 0, 0, 0)
img.save(ruta, "PNG")
def generar_imagen(nombre: str, _metadatos: dict, path_imagenes: str, canal, extent=EXTENT):
start = t.time()
icanal = _metadatos['icanal']
# Parametross de calibracion
offset = _metadatos['offset']
scale = _metadatos['scale']
# Parametros de proyeccion
lat_0 = str(_metadatos['lat_0'])
lon_0 = str(_metadatos['lon_0'])
h = str(_metadatos['h'])
a = str(_metadatos['a'])
b = str(_metadatos['b'])
f = str(_metadatos['f'])
# %% lectura y extraccion de informacion de la pasada
connection_info = 'HDF5:\"' + nombre + '\"://' + canal.variable
raw = gdal.Open(connection_info, gdal.GA_ReadOnly)
# driver = raw.GetDriver().LongName
band = raw.GetRasterBand(1)
bandtype = gdal.GetDataTypeName(band.DataType)
# print(bandtype)
# %% Proyecciones
# GOES-16 Spatial Reference System
source_prj = osr.SpatialReference()
proj_str = f"+proj=geos +h={h} +a={a} +b={b} +f={f} lat_0={lat_0} +lon_0={lon_0} +sweep=x +no_defs"
source_prj.ImportFromProj4(proj_str)
# Lat/lon WSG84 Spatial Reference System
target_prj = osr.SpatialReference()
target_prj.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
# Setup projection and geo-transformation
raw.SetProjection(source_prj.ExportToWkt())
raw.SetGeoTransform(get_geo_t(GOES16_EXTENT, raw.RasterYSize, raw.RasterXSize))
# Compute grid dimension
sizex = int(((extent[2] - extent[0]) * KM_PER_DEGREE) / RESOLUTION)
sizey = int(((extent[3] - extent[1]) * KM_PER_DEGREE) / RESOLUTION)
# Get memory driver
mem_driver = gdal.GetDriverByName('MEM')
# Create grid
grid = mem_driver.Create('grid', sizex, sizey, 1, gdal.GDT_Float32)
# Setup projection and geo-transformation
grid.SetProjection(target_prj.ExportToWkt())
grid.SetGeoTransform(get_geo_t(extent, grid.RasterYSize, grid.RasterXSize))
# Perform the projection/resampling
gdal.ReprojectImage(
raw,
grid,
source_prj.ExportToWkt(),
target_prj.ExportToWkt(),
gdal.GRA_NearestNeighbour,
options=['NUM_THREADS=ALL_CPUS']
)
# Read grid data
array1 = grid.ReadAsArray()
# Mask fill values (i.e. invalid values)
np.ma.masked_where(array1, array1 == -1, False)
# %% Calibracion
array = array1 * scale + offset
grid.GetRasterBand(1).SetNoDataValue(-1)
grid.GetRasterBand(1).WriteArray(array)
# %% Plot the Data ========================================
# Create the basemap reference for the Rectangular Projection
plt.clf()
plt.figure(figsize=(10, 9.5))
# 4326 es WGS84 (LatLOn)
bmap = Basemap(resolution='h', llcrnrlon=extent[0], llcrnrlat=extent[1],
urcrnrlon=extent[2], urcrnrlat=extent[3], epsg=4326)
# Draw the shapefiles
print_shapes(bmap)
plt.subplots_adjust(left=0.02, right=0.98, top=1, bottom=0.02)
# Converts a CPT file to be used in Python
cpt = load_cpt(canal.cptfile)
# Makes a linear interpolation
cpt_convert = LinearSegmentedColormap('cpt', cpt)
# Plot the GOES-16 channel with the converted CPT colors
# (you may alter the min and max to match your preference)
if canal.visible:
bmap.imshow(
array,
origin='upper',
cmap='gray',
vmin=0.,
vmax=1.
)
else:
temp = array - 273.15
bmap.imshow(
temp,
origin='upper',
cmap=cpt_convert,
vmin=-90,
vmax=50
)
# Add a black rectangle in the bottom to insert the image description
# Max Lon - Min Lon
lon_difference = (extent[2] - extent[0])
current_axis = plt.gca()
current_axis.add_patch(Rectangle(
(extent[0], extent[1]),
lon_difference,
lon_difference * 0.020,
alpha=1,
zorder=3,
facecolor='black'
))
titulo_negro = " GOES-16 ABI Canal %02d %s UTC" % (icanal, _metadatos['fecha_img'].strftime('%Y-%m-%d %H:%M'))
institucion = "CONAE-Argentina"
# Add the image description inside the black rectangle
# Max lat - Min lat
lat_difference = (extent[3] - extent[1])
plt.text(extent[0], extent[1] + lat_difference * 0.005, titulo_negro,
horizontalalignment='left', color='white', size=7)
plt.text(extent[2], extent[1] + lat_difference * 0.005, institucion,
horizontalalignment='right', color='white', size=7)
# Insert the colorbar at the right
cb = bmap.colorbar(location='bottom', size='2%', pad='1%')
# Remove the colorbar outline
cb.outline.set_visible(True)
# Remove the colorbar ticks
cb.ax.tick_params(width=0)
# Put the colobar labels inside the colorbar
cb.ax.xaxis.set_tick_params(pad=0)
# Change the color and size of the colorbar labels
cb.ax.tick_params(axis='x', colors='black', labelsize=8)
cb.set_label(canal.unidad)
ax = plt.gca()
img = image.imread(LOGO)
plt.figimage(
img,
25,
100,
# ax.figure.bbox.xmax - 160,
# ax.figure.bbox.ymax - 70,
zorder=1
)
# ax.text(0,
# 1.10,
# canal.nombre,
# verticalalignment='top',
# transform=ax.transAxes,
# fontsize=20
# )
# ax.text(0,
# 1.03,
# metadatos['fecha_img'].strftime('%Y-%m-%d %H:%M') + ' UTC',
# verticalalignment='top',
# transform=ax.transAxes,
# fontsize=12
# )
# ax.text(1,
# 1.03,
# 'GOES-16 ABI Canal %02d' % icanal,
# horizontalalignment='right',
# verticalalignment='top',
# transform=ax.transAxes,
# fontsize=10
# )
# grabar a png
fecha = _metadatos['fecha_img']
fecha_str = fecha.strftime('%Y-%m-%d_%H_%M')
path_fecha = fecha.strftime('%Y_%m/%d')
path_imagen = f"{path_imagenes}/C{_metadatos['icanal']}_ARG{fecha_str}_WGS84.png"
plt.savefig(path_imagen)
plt.clf()
plt.close()
print('- finished! Time:', t.time() - start, 'seconds')
img_api_path = f"GOES/{path_fecha}/{canal.codigo}/C{_metadatos['icanal']}_ARG{fecha_str}_WGS84.png"
post_img_to_api(img_api_path, fecha, producto=canal.codigo, campo_prod='short_name')
def generar_imagen_webmet(nombre, _metadatos, path_imagenes, canal, extent=EXTENT_WEBMET):
start = t.time()
icanal = _metadatos['icanal']
# Parametross de calibracion
offset = _metadatos['offset']
scale = _metadatos['scale']
# Parametros de proyeccion
lat_0 = str(_metadatos['lat_0'])
lon_0 = str(_metadatos['lon_0'])
h = str(_metadatos['h'])
a = str(_metadatos['a'])
b = str(_metadatos['b'])
f = str(_metadatos['f'])
# %% lectura y extraccion de informacion de la pasada
connection_info = 'HDF5:\"' + nombre + '\"://' + canal.variable
raw = gdal.Open(connection_info, gdal.GA_ReadOnly)
# driver = raw.GetDriver().LongName
band = raw.GetRasterBand(1)
bandtype = gdal.GetDataTypeName(band.DataType)
print(bandtype)
# %% Proyecciones
# GOES-16 Spatial Reference System
source_prj = osr.SpatialReference()
proj_str = f"+proj=geos +h={h} +a={a} +b={b} +f={f} lat_0={lat_0} +lon_0={lon_0} +sweep=x +no_defs"
source_prj.ImportFromProj4(proj_str)
# Lat/lon WSG84 Spatial Reference System https://epsg.io/3857
target_prj = osr.SpatialReference()
target_prj.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')
# Setup projection and geo-transformation
raw.SetProjection(source_prj.ExportToWkt())
raw.SetGeoTransform(get_geo_t(GOES16_EXTENT,
raw.RasterYSize,
raw.RasterXSize))
# Compute grid dimension
sizex = int(((extent[2] - extent[0]) * KM_PER_DEGREE) / RESOLUTION)
sizey = int(((extent[3] - extent[1]) * KM_PER_DEGREE) / RESOLUTION)
# Get memory driver
mem_driver = gdal.GetDriverByName('MEM')
# Create grid
grid = mem_driver.Create('grid', sizex, sizey, 1, gdal.GDT_Float32)
# Setup projection and geo-transformation
grid.SetProjection(target_prj.ExportToWkt())
grid.SetGeoTransform(get_geo_t(extent, grid.RasterYSize, grid.RasterXSize))
# Perform the projection/resampling
gdal.ReprojectImage(
raw,
grid,
source_prj.ExportToWkt(),
target_prj.ExportToWkt(),
gdal.GRA_NearestNeighbour,
options=['NUM_THREADS=ALL_CPUS']
)
# Read grid data
array1 = grid.ReadAsArray()
# Mask fill values (i.e. invalid values)
np.ma.masked_where(array1, array1 == -1, False)
# %% Calibracion
array = array1 * scale + offset
grid.GetRasterBand(1).SetNoDataValue(-1)
grid.GetRasterBand(1).WriteArray(array)
# %% Plot the Data ========================================
# Create the basemap reference for the Rectangular Projection
plt.clf()
fig = plt.figure(frameon=False)
fig.set_size_inches(25. * array.shape[1] / array.shape[0], 25, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# 3857 es WGS84 (LatLOn)
bmap = Basemap(resolution='h', llcrnrlon=extent[0], llcrnrlat=extent[1],
urcrnrlon=extent[2], urcrnrlat=extent[3], epsg=3857)
# Converts a CPT file to be used in Python
cpt = load_cpt(canal.cptfile2)
# Makes a linear interpolation
cpt_convert = LinearSegmentedColormap('cpt', cpt)
# Plot the GOES-16 channel with the converted CPT colors
# (you may alter the min and max to match your preference)
if canal.visible:
bmap.imshow(
array,
origin='upper',
cmap='gray',
vmin=0.,
vmax=1.
)
else:
temp = array - 273.15
temp[temp > -5] = 50
bmap.imshow(
temp,
origin='upper',
cmap=cpt_convert,
vmin=-90,
vmax=50
)
# grabar a png
path_imagen = path_imagenes + '/GOES16' + '_\
' + _metadatos['fecha_img'].strftime('%Y%m%dT%H%M%S') + 'Z_C' + str(_metadatos['icanal']) + '.png'
plt.savefig(path_imagen, transparent=True)
plt.clf()
plt.close()
# Close file
raw = None
convertir_negro_transparente(path_imagen)
print('- finished! Time:', t.time() - start, 'seconds')
def geocolor(fecha: datetime.datetime, _canales: dict, | |
"""Unit tests for the ChangeMeta mutation."""
from __future__ import unicode_literals
from django.db import models
from nose import SkipTest
try:
# Django >= 1.11
from django.db.models import Index
except ImportError:
# Django <= 1.10
Index = None
from django_evolution.mutations import ChangeMeta
from django_evolution.support import supports_indexes, supports_index_together
from django_evolution.tests.base_test_case import EvolutionTestCase
class ChangeMetaPlainBaseModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class ChangeMetaIndexesBaseModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
if Index:
indexes = [
Index(fields=['int_field1']),
Index(fields=['char_field1', '-char_field2'],
name='my_custom_index'),
]
class ChangeMetaIndexTogetherBaseModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
index_together = [('int_field1', 'char_field1')]
class ChangeMetaUniqueTogetherBaseModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field1', 'char_field1')]
class ChangeMetaIndexesTests(EvolutionTestCase):
"""Unit tests for ChangeMeta with indexes."""
sql_mapping_key = 'indexes'
DIFF_TEXT = (
"In model tests.TestModel:\n"
" Meta property 'indexes' has changed"
)
@classmethod
def setUpClass(cls):
super(ChangeMetaIndexesTests, cls).setUpClass()
if not supports_indexes:
raise SkipTest('Meta.indexes is not supported on this version '
'of Django')
def test_keeping_empty(self):
"""Testing ChangeMeta(indexes) and keeping list empty"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
indexes = []
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'indexes', []),
],
None,
None,
None,
expect_noop=True)
def test_setting_from_empty(self):
"""Testing ChangeMeta(indexes) and setting to valid list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
indexes = [
Index(fields=['int_field1']),
Index(fields=['char_field1', '-char_field2'],
name='my_custom_index'),
]
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta(
'TestModel',
'indexes',
[
{
'fields': ['int_field1'],
},
{
'fields': ['char_field1', '-char_field2'],
'name': 'my_custom_index',
},
])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'indexes',"
" [{'fields': ['int_field1']},"
" {'fields': ['char_field1', '-char_field2'],"
" 'name': 'my_custom_index'}])"
],
'setting_from_empty')
def test_replace_list(self):
"""Testing ChangeMeta(indexes) and replacing list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
indexes = [
Index(fields=['int_field2']),
]
self.set_base_model(ChangeMetaIndexesBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'indexes',
[{'fields': ['int_field2']}])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'indexes',"
" [{'fields': ['int_field2']}])"
],
'replace_list')
def test_append_list(self):
"""Testing ChangeMeta(indexes) and appending list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
indexes = [
Index(fields=['int_field1']),
Index(fields=['char_field1', '-char_field2'],
name='my_custom_index'),
Index(fields=['int_field2']),
]
self.set_base_model(ChangeMetaIndexesBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta(
'TestModel',
'indexes',
[
{
'fields': ['int_field1'],
},
{
'fields': ['char_field1', '-char_field2'],
'name': 'my_custom_index',
},
{
'fields': ['int_field2'],
},
])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'indexes',"
" [{'fields': ['int_field1']},"
" {'fields': ['char_field1', '-char_field2'],"
" 'name': 'my_custom_index'},"
" {'fields': ['int_field2']}])"
],
'append_list')
def test_removing(self):
"""Testing ChangeMeta(indexes) and removing property"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
self.set_base_model(ChangeMetaIndexesBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'indexes', [])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'indexes', [])"
],
'removing')
def test_missing_indexes(self):
"""Testing ChangeMeta(indexes) and old missing indexes"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
indexes = [
Index(fields=['int_field2']),
]
self.set_base_model(ChangeMetaIndexesBaseModel)
# Remove the indexes from the database state, to simulate the indexes
# not being found in the database. The evolution should still work.
self.database_state.clear_indexes('tests_testmodel')
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'indexes',
[{'fields': ['int_field2']}])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'indexes',"
" [{'fields': ['int_field2']}])"
],
'ignore_missing_indexes',
rescan_indexes=False)
class ChangeMetaIndexTogetherTests(EvolutionTestCase):
"""Unit tests for ChangeMeta with index_together."""
sql_mapping_key = 'index_together'
DIFF_TEXT = (
"In model tests.TestModel:\n"
" Meta property 'index_together' has changed"
)
@classmethod
def setUpClass(cls):
super(ChangeMetaIndexTogetherTests, cls).setUpClass()
if not supports_index_together:
raise SkipTest('Meta.index_together is not supported on this '
'version of Django')
def test_keeping_empty(self):
"""Testing ChangeMeta(index_together) and keeping list empty"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
index_together = []
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'index_together', []),
],
None,
None,
None,
expect_noop=True)
def test_setting_from_empty(self):
"""Testing ChangeMeta(index_together) and setting to valid list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
index_together = [('int_field1', 'char_field1')]
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'index_together',
[('int_field1', 'char_field1')]),
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'index_together',"
" [('int_field1', 'char_field1')])"
],
'setting_from_empty')
def test_replace_list(self):
"""Testing ChangeMeta(index_together) and replacing list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
index_together = [('int_field2', 'char_field2')]
self.set_base_model(ChangeMetaIndexTogetherBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'index_together',
[('int_field2', 'char_field2')]),
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'index_together',"
" [('int_field2', 'char_field2')])"
],
'replace_list')
def test_append_list(self):
"""Testing ChangeMeta(index_together) and appending list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
index_together = [('int_field1', 'char_field1'),
('int_field2', 'char_field2')]
self.set_base_model(ChangeMetaIndexTogetherBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'index_together',
[('int_field1', 'char_field1'),
('int_field2', 'char_field2')]),
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'index_together',"
" [('int_field1', 'char_field1'),"
" ('int_field2', 'char_field2')])"
],
'append_list')
def test_removing(self):
"""Testing ChangeMeta(index_together) and removing property"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
self.set_base_model(ChangeMetaIndexTogetherBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'index_together', [])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'index_together', [])"
],
'removing')
def test_missing_indexes(self):
"""Testing ChangeMeta(index_together) and old missing indexes"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
index_together = [('char_field1', 'char_field2')]
self.set_base_model(ChangeMetaIndexTogetherBaseModel)
# Remove the indexes from the database state, to simulate the indexes
# not being found in the database. The evolution should still work.
self.database_state.clear_indexes('tests_testmodel')
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'index_together',
[('char_field1', 'char_field2')])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'index_together',"
" [('char_field1', 'char_field2')])"
],
'ignore_missing_indexes',
rescan_indexes=False)
class ChangeMetaUniqueTogetherTests(EvolutionTestCase):
"""Unit tests for ChangeMeta with unique_together."""
sql_mapping_key = 'unique_together'
DIFF_TEXT = (
"In model tests.TestModel:\n"
" Meta property 'unique_together' has changed"
)
def test_keeping_empty(self):
"""Testing ChangeMeta(unique_together) and keeping list empty"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = []
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together', []),
],
None,
None,
None,
expect_noop=True)
def test_setting_from_empty(self):
"""Testing ChangeMeta(unique_together) and setting to valid list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field1', 'char_field1')]
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('int_field1', 'char_field1')]),
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('int_field1', 'char_field1')])"
],
'setting_from_empty')
def test_replace_list(self):
"""Testing ChangeMeta(unique_together) and replacing list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field2', 'char_field2')]
self.set_base_model(ChangeMetaUniqueTogetherBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('int_field2', 'char_field2')]),
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('int_field2', 'char_field2')])"
],
'replace_list')
def test_append_list(self):
"""Testing ChangeMeta(unique_together) and appending list"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field1', 'char_field1'),
('int_field2', 'char_field2')]
self.set_base_model(ChangeMetaUniqueTogetherBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('int_field1', 'char_field1'),
('int_field2', 'char_field2')]),
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('int_field1', 'char_field1'),"
" ('int_field2', 'char_field2')])"
],
'append_list')
def test_removing(self):
"""Testing ChangeMeta(unique_together) and removing property"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
self.set_base_model(ChangeMetaUniqueTogetherBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together', [])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together', [])"
],
'removing')
def test_set_remove(self):
"""Testing ChangeMeta(unique_together) and setting indexes and removing
one
"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field1', 'char_field1')]
self.set_base_model(ChangeMetaPlainBaseModel)
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('int_field1', 'char_field1'),
('int_field2', 'char_field2')]),
ChangeMeta('TestModel', 'unique_together',
[('int_field1', 'char_field1')])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('int_field1', 'char_field1')])"
],
'set_remove')
def test_missing_indexes(self):
"""Testing ChangeMeta(unique_together) and old missing indexes"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('char_field1', 'char_field2')]
self.set_base_model(ChangeMetaUniqueTogetherBaseModel)
# Remove the indexes from the database state, to simulate the indexes
# not being found in the database. The evolution should still work.
self.database_state.clear_indexes('tests_testmodel')
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('char_field1', 'char_field2')])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('char_field1', 'char_field2')])"
],
'ignore_missing_indexes',
rescan_indexes=False)
def test_upgrade_from_v1_sig_no_indexes(self):
"""Testing ChangeMeta(unique_together) and upgrade from v1 signature
with no changes and no indexes in database"""
class DestModel(models.Model):
int_field1 = models.IntegerField()
int_field2 = models.IntegerField()
char_field1 = models.CharField(max_length=20)
char_field2 = models.CharField(max_length=40)
class Meta:
unique_together = [('int_field1', 'char_field1')]
self.set_base_model(ChangeMetaPlainBaseModel)
# Pretend this is an older signature with the same unique_together.
model_sig = (
self.start_sig
.get_app_sig('tests')
.get_model_sig('TestModel')
)
model_sig.unique_together = DestModel._meta.unique_together
model_sig._unique_together_applied = False
self.perform_evolution_tests(
DestModel,
[
ChangeMeta('TestModel', 'unique_together',
[('int_field1', 'char_field1')])
],
self.DIFF_TEXT,
[
"ChangeMeta('TestModel', 'unique_together',"
" [('int_field1', 'char_field1')])"
],
'upgrade_from_v1_sig',
| |
<reponame>WojciechKusa/datasets
# coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Switchboard Dialog Act Corpus
The Switchboard Dialog Act Corpus (SwDA) extends the Switchboard-1 Telephone Speech Corpus, Release 2,
with turn/utterance-level dialog-act tags. The tags summarize syntactic, semantic, and pragmatic information
about the associated turn. The SwDA project was undertaken at UC Boulder in the late 1990s.
This script is a modified version of the original swda.py from https://github.com/cgpotts/swda/blob/master/swda.py from
the original corpus repo. Modifications are made to accommodate the HuggingFace Dataset project format.
"""
import csv
import datetime
import glob
import io
import os
import re
import datasets
# Citation as described here: https://github.com/cgpotts/swda#citation.
_CITATION = """\
@techreport{Jurafsky-etal:1997,
Address = {Boulder, CO},
Author = {<NAME> and <NAME> and <NAME>},
Institution = {University of Colorado, Boulder Institute of Cognitive Science},
Number = {97-02},
Title = {Switchboard {SWBD}-{DAMSL} Shallow-Discourse-Function Annotation Coders Manual, Draft 13},
Year = {1997}}
@article{Shriberg-etal:1998,
Author = {<NAME>, <NAME> and <NAME> and <NAME> and <NAME>},
Journal = {Language and Speech},
Number = {3--4},
Pages = {439--487},
Title = {Can Prosody Aid the Automatic Classification of Dialog Acts in Conversational Speech?},
Volume = {41},
Year = {1998}}
@article{Stolcke-etal:2000,
Author = {<NAME> <NAME> <NAME> <NAME>},
Journal = {Computational Linguistics},
Number = {3},
Pages = {339--371},
Title = {Dialogue Act Modeling for Automatic Tagging and Recognition of Conversational Speech},
Volume = {26},
Year = {2000}}
"""
# Description of dataset gathered from: https://github.com/cgpotts/swda#overview.
_DESCRIPTION = """\
The Switchboard Dialog Act Corpus (SwDA) extends the Switchboard-1 Telephone Speech Corpus, Release 2 with
turn/utterance-level dialog-act tags. The tags summarize syntactic, semantic, and pragmatic information about the
associated turn. The SwDA project was undertaken at UC Boulder in the late 1990s.
The SwDA is not inherently linked to the Penn Treebank 3 parses of Switchboard, and it is far from straightforward to
align the two resources. In addition, the SwDA is not distributed with the Switchboard's tables of metadata about the
conversations and their participants.
"""
# Homepage gathered from: https://github.com/cgpotts/swda#overview.
_HOMEPAGE = "http://compprag.christopherpotts.net/swda.html"
# More details about the license: https://creativecommons.org/licenses/by-nc-sa/3.0/.
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License"
# Dataset main url.
_URL = "https://github.com/cgpotts/swda/raw/master/swda.zip"
# Dialogue act tags - long version 217 dialogue acts labels.
_ACT_TAGS = [
"b^m^r",
"qw^r^t",
"aa^h",
"br^m",
"fa^r",
"aa,ar",
"sd^e(^q)^r",
"^2",
"sd;qy^d",
"oo",
"bk^m",
"aa^t",
"cc^t",
"qy^d^c",
"qo^t",
"ng^m",
"qw^h",
"qo^r",
"aa",
"qy^d^t",
"qrr^d",
"br^r",
"fx",
"sd,qy^g",
"ny^e",
"^h^t",
"fc^m",
"qw(^q)",
"co",
"o^t",
"b^m^t",
"qr^d",
"qw^g",
"ad(^q)",
"qy(^q)",
"na^r",
"am^r",
"qr^t",
"ad^c",
"qw^c",
"bh^r",
"h^t",
"ft^m",
"ba^r",
"qw^d^t",
"%",
"t3",
"nn",
"bd",
"h^m",
"h^r",
"sd^r",
"qh^m",
"^q^t",
"sv^2",
"ft",
"ar^m",
"qy^h",
"sd^e^m",
"qh^r",
"cc",
"fp^m",
"ad",
"qo",
"na^m^t",
"fo^c",
"qy",
"sv^e^r",
"aap",
"no",
"aa^2",
"sv(^q)",
"sv^e",
"nd",
'"',
"bf^2",
"bk",
"fp",
"nn^r^t",
"fa^c",
"ny^t",
"ny^c^r",
"qw",
"qy^t",
"b",
"fo",
"qw^r",
"am",
"bf^t",
"^2^t",
"b^2",
"x",
"fc",
"qr",
"no^t",
"bk^t",
"bd^r",
"bf",
"^2^g",
"qh^c",
"ny^c",
"sd^e^r",
"br",
"fe",
"by",
"^2^r",
"fc^r",
"b^m",
"sd,sv",
"fa^t",
"sv^m",
"qrr",
"^h^r",
"na",
"fp^r",
"o",
"h,sd",
"t1^t",
"nn^r",
"cc^r",
"sv^c",
"co^t",
"qy^r",
"sv^r",
"qy^d^h",
"sd",
"nn^e",
"ny^r",
"b^t",
"ba^m",
"ar",
"bf^r",
"sv",
"bh^m",
"qy^g^t",
"qo^d^c",
"qo^d",
"nd^t",
"aa^r",
"sd^2",
"sv;sd",
"qy^c^r",
"qw^m",
"qy^g^r",
"no^r",
"qh(^q)",
"sd;sv",
"bf(^q)",
"+",
"qy^2",
"qw^d",
"qy^g",
"qh^g",
"nn^t",
"ad^r",
"oo^t",
"co^c",
"ng",
"^q",
"qw^d^c",
"qrr^t",
"^h",
"aap^r",
"bc^r",
"sd^m",
"bk^r",
"qy^g^c",
"qr(^q)",
"ng^t",
"arp",
"h",
"bh",
"sd^c",
"^g",
"o^r",
"qy^c",
"sd^e",
"fw",
"ar^r",
"qy^m",
"bc",
"sv^t",
"aap^m",
"sd;no",
"ng^r",
"bf^g",
"sd^e^t",
"o^c",
"b^r",
"b^m^g",
"ba",
"t1",
"qy^d(^q)",
"nn^m",
"ny",
"ba,fe",
"aa^m",
"qh",
"na^m",
"oo(^q)",
"qw^t",
"na^t",
"qh^h",
"qy^d^m",
"ny^m",
"fa",
"qy^d",
"fc^t",
"sd(^q)",
"qy^d^r",
"bf^m",
"sd(^q)^t",
"ft^t",
"^q^r",
"sd^t",
"sd(^q)^r",
"ad^t",
]
# Damsl dialogue act tags version - short version 43 dialogue acts labels.
_DAMSL_ACT_TAGS = [
"ad",
"qo",
"qy",
"arp_nd",
"sd",
"h",
"bh",
"no",
"^2",
"^g",
"ar",
"aa",
"sv",
"bk",
"fp",
"qw",
"b",
"ba",
"t1",
"oo_co_cc",
"+",
"ny",
"qw^d",
"x",
"qh",
"fc",
'fo_o_fw_"_by_bc',
"aap_am",
"%",
"bf",
"t3",
"nn",
"bd",
"ng",
"^q",
"br",
"qy^d",
"fa",
"^h",
"b^m",
"ft",
"qrr",
"na",
]
class Swda(datasets.GeneratorBasedBuilder):
"""
This is the HuggingFace Dataset class for swda.
Switchboard Dialog Act Corpus Hugging Face Dataset class.
The Switchboard Dialog Act Corpus (SwDA) extends the Switchboard-1 Telephone Speech Corpus, Release 2,
with turn/utterance-level dialog-act tags. The tags summarize syntactic, semantic, and pragmatic information
about the associated turn. The SwDA project was undertaken at UC Boulder in the late 1990s.
"""
# Urls for each split train-dev-test.
_URLS = {
"train": "https://github.com/NathanDuran/Probabilistic-RNN-DA-Classifier/raw/master/data/train_split.txt",
"dev": "https://github.com/NathanDuran/Probabilistic-RNN-DA-Classifier/raw/master/data/dev_split.txt",
"test": "https://github.com/NathanDuran/Probabilistic-RNN-DA-Classifier/raw/master/data/test_split.txt",
}
def _info(self):
"""
Specify the datasets.DatasetInfo object which contains information and typings for the dataset.
"""
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# This defines the different columns of the dataset and their types.
features=datasets.Features(
{
"swda_filename": datasets.Value("string"),
"ptb_basename": datasets.Value("string"),
"conversation_no": datasets.Value("int64"),
"transcript_index": datasets.Value("int64"),
"act_tag": datasets.ClassLabel(num_classes=217, names=_ACT_TAGS),
"damsl_act_tag": datasets.ClassLabel(num_classes=43, names=_DAMSL_ACT_TAGS),
"caller": datasets.Value("string"),
"utterance_index": datasets.Value("int64"),
"subutterance_index": datasets.Value("int64"),
"text": datasets.Value("string"),
"pos": datasets.Value("string"),
"trees": datasets.Value("string"),
"ptb_treenumbers": datasets.Value("string"),
"talk_day": datasets.Value("string"),
"length": datasets.Value("int64"),
"topic_description": datasets.Value("string"),
"prompt": datasets.Value("string"),
"from_caller": datasets.Value("int64"),
"from_caller_sex": datasets.Value("string"),
"from_caller_education": datasets.Value("int64"),
"from_caller_birth_year": datasets.Value("int64"),
"from_caller_dialect_area": datasets.Value("string"),
"to_caller": datasets.Value("int64"),
"to_caller_sex": datasets.Value("string"),
"to_caller_education": datasets.Value("int64"),
"to_caller_birth_year": datasets.Value("int64"),
"to_caller_dialect_area": datasets.Value("string"),
}
),
supervised_keys=None,
# Homepage of the dataset for documentation
homepage=_HOMEPAGE,
# License for the dataset if available
license=_LICENSE,
# Citation for the dataset
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""
Returns SplitGenerators.
This method is tasked with downloading/extracting the data and defining the splits.
Args:
dl_manager (:obj:`datasets.utils.download_manager.DownloadManager`):
Download manager to download and extract data files from urls.
Returns:
:obj:`list[str]`:
List of paths to data.
"""
# Download extract and return path of data file.
dl_dir = dl_manager.download_and_extract(_URL)
# Use swda/ folder.
data_dir = os.path.join(dl_dir, "swda")
# Handle partitions files.
urls_to_download = self._URLS
# Download extract and return paths of split files.
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
# Return whole data path and train splits file downloaded path.
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["train"]}
),
# Return whole data path and dev splits file downloaded path.
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["dev"]},
),
# Return whole data path and train splits file downloaded path.
datasets.SplitGenerator(
name=datasets.Split.TEST, gen_kwargs={"data_dir": data_dir, "split_file": downloaded_files["test"]}
),
]
def _generate_examples(self, data_dir, split_file):
"""
Yields examples.
This method will receive as arguments the `gen_kwargs` defined in the previous `_split_generators` method.
It is in charge of opening the given file and yielding (key, example) tuples from the dataset
The key is not important, it's more here for legacy reason (legacy from tfds).
Args:
data_dir (:obj:`str`):
Path where is downloaded dataset.
split_file (:obj:`str`):
Path of split file used for train-dev-test.
Returns:
:obj:`list[str]`:
List of paths to data.
"""
# Read in the split file.
split_file = io.open(file=split_file, mode="r", encoding="utf-8").read().splitlines()
# Read in corpus data using split files.
corpus = CorpusReader(src_dirname=data_dir, split_file=split_file)
# Generate examples.
for i_trans, trans in enumerate(corpus.iter_transcripts()):
for i_utt, utt in enumerate(trans.utterances):
id_ = str(i_trans) + ":" + str(i_utt)
yield id_, {feature: utt[feature] for feature in self.info.features.keys()}
class CorpusReader:
"""Class for reading in the corpus and iterating through its values."""
def __init__(self, src_dirname, split_file=None):
"""
Reads in the data from `src_dirname` (should be the root of the
corpus). Assumes that the metadata file `swda-metadata.csv` is
in the main directory of the corpus, using that file to build
the `Metadata` object used throughout.
Args:
src_dirname (:obj:`str`):
Path where swda folder with all data.
split_file (:obj:`list[str`, `optional`):
List of file names used in a split (train, dev or test). This argument is optional and it will have a None value attributed inside the function.
"""
self.src_dirname = src_dirname
metadata_filename = os.path.join(src_dirname, "swda-metadata.csv")
self.metadata = Metadata(metadata_filename)
self.split_file = split_file
def iter_transcripts(
self,
):
"""
Iterate through the transcripts.
Returns:
:obj:`Transcript`:
Transcript instance.
"""
# All files names.
filenames = glob.glob(os.path.join(self.src_dirname, | |
etc). This
parameter only makes sense if the input comes in the form of molecular
dynamics trajectories or data, and will otherwise create a warning and
have no effect.
top : str, optional, default = None
A topology file name. This is needed when molecular dynamics
trajectories are given and no featurizer is given.
In this case, only the Cartesian coordinates will be read.
chunk_size: int, optional, default = 100 for file readers and 5000 for
already loaded data The chunk size at which the input file is being
processed.
Returns
-------
reader obj: type depends on input data
1. :class:`FeatureReader <pyemma.coordinates.data.feature_reader.FeatureReader>` for MD-data
2. :class:`NumPyFileReader <pyemma.coordinates.data.numpy_filereader.NumPyFileReader>` for .npy files
3. :class:`PyCSVReader <pyemma.coordinates.data.py_csv_reader.PyCSVReader>` for csv files.
4. :class:`DataInMemory <pyemma.coordinates.data.data_in_memory.DataInMemory>` for already loaded data (e.g NumPy arrays)
See also
--------
:func:`pyemma.coordinates.pipeline`
The data input is the first stage for your pipeline. Add other stages
to it and build a pipeline to analyze big data in streaming mode.
Examples
--------
Create a reader for NumPy files:
>>> import numpy as np
>>> from pyemma.coordinates import source
>>> reader = source(['001.npy', '002.npy'] # doctest: +SKIP
Create a reader for trajectory files and select some distance as feature:
>>> reader = source(['traj01.xtc', 'traj02.xtc'], top='my_structure.pdb') # doctest: +SKIP
>>> reader.featurizer.add_distances([[0, 1], [5, 6]]) # doctest: +SKIP
>>> calculated_features = reader.get_output() # doctest: +SKIP
create a reader for a csv file:
>>> reader = source('data.csv') # doctest: +SKIP
Create a reader for huge NumPy in-memory arrays to process them in
huge chunks to avoid memory issues:
>>> data = np.random.random(int(1e7))
>>> reader = source(data, chunk_size=5000)
>>> from pyemma.coordinates import cluster_regspace
>>> regspace = cluster_regspace(reader, dmin=0.1)
"""
# CASE 1: input is a string or list of strings
# check: if single string create a one-element list
if isinstance(inp, string_types) or (isinstance(inp, (list, tuple))
and (any(isinstance(item, string_types) for item in inp) or len(inp) is 0)):
reader = _create_file_reader(inp, top, features, chunk_size=chunk_size if chunk_size else 100)
elif isinstance(inp, _np.ndarray) or (isinstance(inp, (list, tuple))
and (any(isinstance(item, _np.ndarray) for item in inp) or len(inp) is 0)):
# CASE 2: input is a (T, N, 3) array or list of (T_i, N, 3) arrays
# check: if single array, create a one-element list
# check: do all arrays have compatible dimensions (*, N, 3)? If not: raise ValueError.
# check: if single array, create a one-element list
# check: do all arrays have compatible dimensions (*, N)? If not: raise ValueError.
# create MemoryReader
reader = _DataInMemory(inp, chunksize=chunk_size if chunk_size else 5000)
else:
raise ValueError('unsupported type (%s) of input' % type(inp))
return reader
def pipeline(stages, run=True, stride=1, chunksize=100):
r""" Data analysis pipeline.
Constructs a data analysis :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` and parametrizes it
(unless prevented).
If this function takes too long, consider loading data in memory.
Alternatively if the data is to large to be loaded into memory make use
of the stride parameter.
Parameters
----------
stages : data input or list of pipeline stages
If given a single pipeline stage this must be a data input constructed
by :py:func:`source`. If a list of pipelining stages are given, the
first stage must be a data input constructed by :py:func:`source`.
run : bool, optional, default = True
If True, the pipeline will be parametrized immediately with the given
stages. If only an input stage is given, the run flag has no effect at
this time. True also means that the pipeline will be immediately
re-parametrized when further stages are added to it.
*Attention* True means this function may take a long time to compute.
If False, the pipeline will be passive, i.e. it will not do any
computations before you call parametrize()
stride : int, optional, default = 1
If set to 1, all input data will be used throughout the pipeline to
parametrize its stages. Note that this could cause the parametrization
step to be very slow for large data sets. Since molecular dynamics data
is usually correlated at short timescales, it is often sufficient to
parametrize the pipeline at a longer stride.
See also stride option in the output functions of the pipeline.
chunksize : int, optiona, default = 100
how many datapoints to process as a batch at one step
Returns
-------
pipe : :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>`
A pipeline object that is able to conduct big data analysis with
limited memory in streaming mode.
Examples
--------
>>> import numpy as np
>>> from pyemma.coordinates import source, tica, assign_to_centers, pipeline
Create some random data and cluster centers:
>>> data = np.random.random((1000, 3))
>>> centers = data[np.random.choice(1000, 10)]
>>> reader = source(data)
Define a TICA transformation with lag time 10:
>>> tica_obj = tica(lag=10)
Assign any input to given centers:
>>> assign = assign_to_centers(centers=centers)
>>> pipe = pipeline([reader, tica_obj, assign])
>>> pipe.parametrize()
.. autoclass:: pyemma.coordinates.pipelines.Pipeline
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:attributes:
"""
if not isinstance(stages, list):
stages = [stages]
p = _Pipeline(stages, param_stride=stride, chunksize=chunksize)
if run:
p.parametrize()
return p
def discretizer(reader,
transform=None,
cluster=None,
run=True,
stride=1,
chunksize=100):
r""" Specialized pipeline: From trajectories to clustering.
Constructs a pipeline that consists of three stages:
1. an input stage (mandatory)
2. a transformer stage (optional)
3. a clustering stage (mandatory)
This function is identical to calling :func:`pipeline` with the three
stages, it is only meant as a guidance for the (probably) most common
usage cases of a pipeline.
Parameters
----------
reader : instance of :class:`pyemma.coordinates.data.reader.ChunkedReader`
The reader instance provides access to the data. If you are working
with MD data, you most likely want to use a FeatureReader.
transform : instance of :class: `pyemma.coordinates.Transformer`
an optional transform like PCA/TICA etc.
cluster : instance of :class: `pyemma.coordinates.AbstractClustering`
clustering Transformer (optional) a cluster algorithm to assign
transformed data to discrete states.
stride : int, optional, default = 1
If set to 1, all input data will be used throughout the pipeline
to parametrize its stages. Note that this could cause the
parametrization step to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales,
it is often sufficient to parametrize the pipeline at a longer stride.
See also stride option in the output functions of the pipeline.
chunksize : int, optiona, default = 100
how many datapoints to process as a batch at one step
Returns
-------
pipe : a :class:`Pipeline <pyemma.coordinates.pipelines.Discretizer>` object
A pipeline object that is able to streamline data analysis of large
amounts of input data with limited memory in streaming mode.
Examples
--------
Construct a discretizer pipeline processing all data
with a PCA transformation and cluster the principal components
with uniform time clustering:
>>> import numpy as np
>>> from pyemma.coordinates import source, pca, cluster_regspace, discretizer
>>> from pyemma.datasets import get_bpti_test_data
>>> reader = source(get_bpti_test_data()['trajs'], top=get_bpti_test_data()['top'])
>>> transform = pca(dim=2)
>>> cluster = cluster_regspace(dmin=0.1)
>>> disc = discretizer(reader, transform, cluster)
Finally you want to run the pipeline:
>>> disc.parametrize()
Access the the discrete trajectories and saving them to files:
>>> disc.dtrajs # doctest: +ELLIPSIS
[array([...
This will store the discrete trajectory to "traj01.dtraj":
>>> from pyemma.util.files import TemporaryDirectory
>>> import os
>>> with TemporaryDirectory('dtrajs') as tmpdir:
... disc.save_dtrajs(output_dir=tmpdir)
... sorted(os.listdir(tmpdir))
['bpti_001-033.dtraj', 'bpti_034-066.dtraj', 'bpti_067-100.dtraj']
"""
if cluster is None:
_logger.warning('You did not specify a cluster algorithm.'
' Defaulting to kmeans(k=100)')
cluster = _KmeansClustering(n_clusters=100)
disc = _Discretizer(reader, transform, cluster, param_stride=stride)
if run:
disc.parametrize()
return disc
def save_traj(traj_inp, indexes, outfile, top=None, stride = 1, chunksize=1000, verbose=False):
r""" Saves a sequence of frames as a single trajectory.
Extracts the specified sequence of time/trajectory indexes from traj_inp
and saves it to one single molecular dynamics trajectory file. The output
format will be determined by the outfile name.
Parameters
----------
traj_inp :
traj_inp can be of two types.
1. a python list of strings containing the filenames associated with
the indices in :py:obj:`indexes`. With | |
0.0225
o -ne-sx 79.7 108.92 SOURCE3 1
o -ne-sy 86.1 111.34 SOURCE3 1
p2-ne-pe 110.5 116.81 SOURCE3 1
p2-ne-px 105.8 128.35 SOURCE3 1
p2-ne-py 111.5 123.47 SOURCE3 1
p2-ne-sx 83.9 112.12 SOURCE3 1
p2-ne-sy 87.7 115.73 SOURCE3 1
pe-ne-s 87.0 115.73 SOURCE3 1
px-ne-s 81.8 131.84 SOURCE3 1
py-ne-s 90.3 116.18 SOURCE3 4 3.7135
s -ne-s 70.5 120.87 SOURCE3 1
s -ne-sx 65.4 112.96 SOURCE3 1
s -ne-sy 67.7 119.63 SOURCE3 1
c1-nf-ca 62.6 151.95 CORR_SOURCE5 15 1.4352
c1-nf-ch 67.9 140.00 SOURCE2 1
c2-nf-ca 68.5 120.83 CORR_SOURCE5 103 1.9474
c2-nf-cf 70.3 116.01 SOURCE3_SOURCE5 31 2.1630
c2-nf-n2 93.6 113.31 SOURCE3 1
c2-nf-nf 89.0 110.86 SOURCE3 7
c2-nf-p2 84.1 134.03 SOURCE3 1
c2-nf-pf 82.5 120.52 SOURCE3 8
c2-nf-px 83.9 117.75 SOURCE3 5
c2-nf-py 88.2 117.04 SOURCE3 3
c2-nf-sx 62.7 111.98 SOURCE3 3
c2-nf-sy 65.6 120.60 CORR_SOURCE5 19 1.1215
ca-nf-ce 68.1 121.71 CORR_SOURCE5 29 1.8572
ca-nf-n2 88.7 114.35 CORR_SOURCE5 15 1.3133
ca-nf-ne 88.6 115.17 CORR_SOURCE5 98 0.8636
ca-nf-o 89.2 115.69 SOURCE3_SOURCE5 15 1.8257
ca-nf-p2 87.2 118.09 SOURCE3 1
ca-nf-s 68.1 120.11 SOURCE3 1
c -nf-c2 69.7 118.53 CORR 6
cf-nf-n2 90.5 111.19 SOURCE3 1
cf-nf-o 91.2 112.16 SOURCE3 1
cf-nf-p2 87.9 117.02 SOURCE3 1
cf-nf-s 69.4 116.28 SOURCE3 1
ch-nf-n1 90.2 120.20 SOURCE2 1
ch-nf-n2 92.3 113.39 SOURCE3 1
ch-nf-o 93.0 114.70 SOURCE2 1
ch-nf-p2 88.4 119.57 SOURCE3 1
ch-nf-s 70.2 117.70 SOURCE3 1
f -n -f 116.1 102.98 SOURCE3 1
n2-nf-n2 121.5 107.22 SOURCE3 1
n2-nf-nf 112.2 110.72 SOURCE3 9
n2-nf-o 119.6 114.10 SOURCE3 1
n2-nf-p2 116.9 109.66 SOURCE3 1
n2-nf-pf 107.4 112.15 SOURCE3 7
n2-nf-px 106.1 115.97 SOURCE3 3
n2-nf-py 112.0 114.60 SOURCE3 3
n2-nf-s 89.7 115.90 SOURCE3 1
n2-nf-sx 80.2 107.29 SOURCE3 1
n2-nf-sy 85.7 111.21 SOURCE3 1
nf-nf-o 113.5 110.45 SOURCE3 10
nf-nf-p2 110.5 114.39 SOURCE3 6
nf-nf-s 86.4 115.95 SOURCE3 6
o -nf-o 116.7 124.09 SOURCE3 2
o -nf-pf 99.2 132.32 SOURCE3 11
o -nf-px 109.1 110.62 SOURCE3 1
o -nf-py 114.7 110.79 SOURCE3 4
o -nf-s 89.9 117.19 SOURCE3 2
o -nf-sx 79.7 108.92 SOURCE3 1
o -nf-sy 86.1 111.34 SOURCE3 1
p2-nf-pf 110.5 116.81 SOURCE3 1
p2-nf-px 105.8 128.35 SOURCE3 1
p2-nf-py 111.5 123.47 SOURCE3 1
p2-nf-sx 83.9 112.12 SOURCE3 1
p2-nf-sy 87.7 115.73 SOURCE3 1
pf-nf-s 87.0 115.73 SOURCE3 1
px-nf-s 81.8 131.84 SOURCE3 1
py-nf-s 90.3 116.18 SOURCE3 4
s -nf-s 70.5 120.87 SOURCE3 1
s -nf-sx 65.4 112.96 SOURCE3 1
s -nf-sy 67.7 119.63 SOURCE3 1
br-nh-br 67.7 106.27 SOURCE3 1
br-nh-ca 63.1 111.88 SOURCE3 1
br-nh-hn 42.0 101.56 SOURCE3 1
c1-nh-c1 70.3 116.98 SOURCE3 1
c1-nh-c2 67.3 123.35 SOURCE4_SOURCE5 17 1.3108
c1-nh-ca 67.6 122.36 SOURCE3 3 1.2016
c1-nh-hn 49.9 117.40 SOURCE4_SOURCE5 22 0.6517
c2-nh-c2 65.8 124.73 SOURCE4_SOURCE5 107 1.4158
c2-nh-c3 64.2 123.71 SOURCE3 8 3.5348
c2-nh-ca 65.1 127.56 SOURCE4_SOURCE5 258 2.3985
c2-nh-cc 65.7 126.35 CORR_SOURCE5 14 0.8394
c2-nh-cd 65.7 126.35 CORR_SOURCE5 14 0.8394
c2-nh-cx 64.8 124.39 5/2017 3 1.3163
c2-nh-hn 49.0 115.09 SOURCE4_SOURCE5 2743 1.5424
c2-nh-n2 85.0 120.22 SOURCE4_SOURCE5 101 1.0922
c2-nh-n3 84.3 116.87 SOURCE4_SOURCE5 35 1.4173
c2-nh-no 82.2 125.62 SOURCE4_SOURCE5 19 0.8850
c2-nh-oh 86.0 112.18 SOURCE4_SOURCE5 38 1.3409
c2-nh-os 85.7 112.95 SOURCE4_SOURCE5 14 0.4455
c2-nh-sy 63.2 121.13 SOURCE4_SOURCE5 20 0.5133
c3-nh-c3 65.1 114.51 SOURCE4_SOURCE5 1386 2.1206
c3-nh-ca 65.2 119.98 SOURCE3_SOURCE5 1640 2.1716
c3-nh-cc 65.6 119.72 CORR_SOURCE5 638 2.4802
c3-nh-cd 65.6 119.72 CORR_SOURCE5 638 2.4802
c3-nh-cf 65.1 120.12 SOURCE4_SOURCE5 52 2.0459
c3-nh-cz 64.7 125.46 SOURCE4_SOURCE5 25 0.5651
c3-nh-hn 46.4 115.99 SOURCE3_SOURCE5 1206 1.7716
c3-nh-n2 85.3 112.35 SOURCE3 9 4.0058
c3-nh-n 84.4 111.27 SOURCE4_SOURCE5 20 2.2657
c3-nh-na 84.0 112.39 SOURCE4_SOURCE5 18 1.3421
c3-nh-p2 80.3 123.35 SOURCE3 1
c3-nh-sy 63.5 116.32 SOURCE4_SOURCE5 31 1.3018
ca-nh-ca 65.2 127.46 SOURCE3 2 0.0002
ca-nh-cc 64.9 129.80 CORR_SOURCE5 49 1.2126
ca-nh-cd 64.9 129.80 CORR_SOURCE5 49 1.2126
ca-nh-cl 71.4 113.15 SOURCE3 1
ca-nh-cx 64.9 124.23 5/2017 10 0.2451
ca-nh-f 89.4 106.09 SOURCE3 3 1.0660
ca-nh-hn 48.8 116.07 SOURCE4_SOURCE5 5026 1.3182
ca-nh-i 58.9 117.83 SOURCE3 1
ca-nh-n1 86.5 117.13 HF/6-31G* 1
ca-nh-n2 84.7 121.13 SOURCE4_SOURCE5 61 1.2262
ca-nh-n3 84.0 117.83 SOURCE3_SOURCE5 31 1.9504
ca-nh-n4 85.7 108.94 SOURCE3 5 0.6562
ca-nh-n 85.1 116.03 SOURCE4_SOURCE5 31 1.0216
ca-nh-na 85.1 115.96 SOURCE3_SOURCE5 14 0.6985
ca-nh-nh 85.5 114.84 SOURCE3_SOURCE5 14 1.2270
ca-nh-no 86.3 113.92 SOURCE3 4 2.9561
ca-nh-o 87.0 121.92 SOURCE3 2 3.9630
ca-nh-oh 85.8 112.97 SOURCE3_SOURCE5 7 0.3980
ca-nh-os 86.2 111.85 SOURCE3_SOURCE5 8 0.6032
ca-nh-p2 81.0 125.27 SOURCE3 8 5.1798
ca-nh-p3 79.1 125.70 SOURCE3 3 5.7796
ca-nh-p4 80.5 124.01 SOURCE3 3 2.5810
ca-nh-p5 80.4 128.17 SOURCE3_SOURCE5 9 0.9847
ca-nh-s4 63.7 115.62 SOURCE3 3 0.3434
ca-nh-s6 63.2 122.85 SOURCE4_SOURCE5 92 2.1278
ca-nh-s 60.9 122.54 SOURCE3 3 2.7001
ca-nh-sh 63.3 121.41 SOURCE3 1
ca-nh-ss 63.2 121.50 SOURCE3 3 2.6255
ca-nh-sy 62.2 125.23 SOURCE4_SOURCE5 116 1.6241
cc-nh-cx 64.3 127.53 5/2017 2 0.0096
cc-nh-hn 49.3 115.63 SOURCE3_SOURCE5 1084 1.8598
cc-nh-n2 85.5 120.09 SOURCE4_SOURCE5 21 1.0306
cc-nh-sy 63.0 122.52 SOURCE4_SOURCE5 60 1.2839
cd-nh-cx 64.7 123.70 CORR_SOURCE5 82 1.6057
cd-nh-hn 49.3 115.63 SOURCE3_SOURCE5 1084 1.8598
ce-nh-hn 48.7 115.68 CORR_SOURCE5 360 1.2286
ce-nh-o 84.2 129.43 CORR 2
ce-nh-sy 65.3 113.39 SOURCE4_SOURCE5 15 1.0862
cf-nh-hn 48.7 115.68 CORR_SOURCE5 360 1.2286
cf-nh-o 84.2 129.43 CORR 2
cl-nh-cl 81.7 106.60 SOURCE3 1
cl-nh-hn 48.7 104.14 SOURCE3 1
cx-nh-cx 89.0 62.01 SOURCE4_SOURCE5 98 0.5911
cx-nh-hn 46.7 118.88 5/2017 15 0.2217
cz-nh-hn 49.2 121.15 SOURCE4_SOURCE5 116 0.7805
f -nh-f 114.4 101.70 SOURCE3 1
f -nh-hn 64.7 101.23 SOURCE3 1
hn-nh-hn 39.5 115.12 SOURCE4_SOURCE5 3024 2.1393
hn-nh-i 37.9 107.57 SOURCE3 1
hn-nh-n1 64.4 110.57 HF/6-31G* 1
hn-nh-n2 61.9 118.14 SOURCE4_SOURCE5 220 2.1956
hn-nh-n3 60.5 113.97 SOURCE3_SOURCE5 53 1.8422
hn-nh-n4 61.2 104.40 SOURCE3 3 0.5056
hn-nh-n 62.7 108.17 SOURCE4_SOURCE5 39 1.1076
hn-nh-na 62.7 108.24 SOURCE3_SOURCE5 48 1.3913
hn-nh-nh 61.9 110.86 SOURCE4_SOURCE5 20 1.2814
hn-nh-no 62.8 109.94 SOURCE4_SOURCE5 17 0.1843
hn-nh-o 65.9 116.45 SOURCE3 2 0.6063
hn-nh-oh 62.6 106.49 SOURCE4_SOURCE5 45 1.2492
hn-nh-os 62.7 106.07 SOURCE3_SOURCE5 11 1.1257
hn-nh-p2 55.5 118.18 SOURCE3 21 3.6927
hn-nh-p3 54.2 116.19 SOURCE3 3 3.0539
hn-nh-p4 55.9 112.60 SOURCE3 3 0.8237
hn-nh-p5 56.5 115.09 SOURCE3_SOURCE5 12 1.4234
hn-nh-s4 43.3 107.48 SOURCE3 3 1.3960
hn-nh-s 41.1 114.37 SOURCE3 1
hn-nh-s6 44.3 109.92 SOURCE4_SOURCE5 70 0.7219
hn-nh-sh 43.5 112.25 SOURCE3 1
hn-nh-ss 43.2 114.10 SOURCE3_SOURCE5 9 0.8638
hn-nh-sy 43.6 110.91 SOURCE4_SOURCE5 174 1.2855
i -nh-i 65.2 115.82 SOURCE3 1
n1-nh-n1 115.5 106.71 HF/6-31G* 1
n2-nh-n2 109.1 117.50 SOURCE3 2 1.1907
n2-nh-n3 105.8 119.06 SOURCE3_SOURCE5 5 1.1057
n2-nh-o 108.7 126.06 SOURCE3 1
n3-nh-n3 107.3 110.98 SOURCE3 1
n4-nh-n4 104.8 108.36 SOURCE3 1
na-nh-na 107.9 112.01 SOURCE3 1
hn-n -hn 39.0 117.95 SOURCE3_SOURCE5 619 1.1004
nh-nh-nh 107.7 112.23 SOURCE3 1
hn-n -i 37.5 117.24 SOURCE3 2 0.4435
hn-n -n2 61.3 119.08 SOURCE3_SOURCE5 133 1.1985
hn-n -n3 60.1 117.24 SOURCE4_SOURCE5 85 1.3614
hn-n -n4 60.2 112.68 SOURCE3 3 1.9746
hn-n -n 61.1 113.20 SOURCE3_SOURCE5 44 1.5099
hn-n -na 60.7 114.35 SOURCE3_SOURCE5 14 1.6595
hn-n -nc 62.3 115.42 SOURCE4_SOURCE5 34 0.6814
hn-n -nh 61.2 113.21 SOURCE4_SOURCE5 34 1.4195
hn-n -no 60.0 110.11 SOURCE3 1
hn-n -o 66.7 116.32 SOURCE3 2 0.0175
n -nh-o 111.0 115.63 SOURCE3 1
hn-n -oh 61.7 110.74 SOURCE4_SOURCE5 106 1.1526
no-nh-no 110.7 108.55 SOURCE3 1
hn-n -os 61.8 110.01 SOURCE4_SOURCE5 28 0.8603
hn-n -p2 53.6 118.05 SOURCE3 7 3.0564
hn-n -p3 52.0 119.63 SOURCE3 2
hn-n -p4 54.1 115.71 SOURCE3 1
hn-n -p5 55.2 113.61 SOURCE4_SOURCE5 12 0.8598
hn-n -s4 41.9 112.46 SOURCE3 1
hn-n -s 41.4 114.92 SOURCE3 2 0.0260
hn-n -s6 43.2 112.56 SOURCE4_SOURCE5 18 0.6934
hn-n -sh 42.5 114.91 SOURCE3 1
hn-n -ss 42.4 115.60 SOURCE3 3 0.6414
hn-n -sy 43.3 112.33 SOURCE4_SOURCE5 87 0.6324
oh-nh-oh 109.6 106.27 SOURCE3 1
o -nh-o 111.9 128.06 SOURCE3 1
os-nh-os 110.1 105.27 SOURCE3 1
p2-nh-p2 103.6 127.33 SOURCE3 2 2.7857
p3-nh-p3 101.5 125.08 SOURCE3 1
p5-nh-p5 110.6 112.76 SOURCE3 1
s4-nh-s4 64.3 112.39 SOURCE3 1
s6-nh-s6 64.0 120.27 SOURCE3 1
sh-nh-sh 64.0 119.00 SOURCE3 1
s -nh-s 61.3 118.73 SOURCE3 1
ss-nh-ss 63.9 119.25 SOURCE3 1
i -n -i 66.1 118.20 SOURCE3 1
n2-n -n2 108.7 116.89 SOURCE3 1
n3-n -n3 104.9 117.94 SOURCE3 1
n4-n -n4 105.2 112.69 SOURCE3 1
na-n -na 104.9 117.38 SOURCE3 1
nc-n -nc 109.0 116.41 CORR 2
nc-n -p2 102.8 117.21 CORR 2
nc-n -pc 102.5 117.21 CORR 2
nd-n -nd 109.0 116.41 CORR 2
nd-n -p2 102.8 117.21 CORR 2
nd-n -pd 102.5 117.21 CORR 2
nh-n -nh 106.3 115.18 SOURCE3 1
n -n -n 106.4 114.62 SOURCE3 1
no-n -no 105.4 108.66 SOURCE3 1
br-no-o 72.5 113.19 SOURCE3 2
c1-no-o 89.1 116.63 SOURCE3 6
c2-no-o 86.9 117.67 SOURCE3_SOURCE5 49 0.7530
c3-no-o 83.5 116.93 SOURCE3_SOURCE5 182 0.7108
ca-no-o 85.9 117.76 SOURCE3_SOURCE5 886 0.2929
cc-no-o 87.7 117.49 SOURCE4_SOURCE5 624 0.5662
cl-no-o 86.5 115.08 SOURCE3 2
c -no-o 83.8 115.26 SOURCE3 1
hn-no-o 67.4 115.49 SOURCE3 2
oh-n -oh 109.8 107.26 SOURCE3 1
i -no-o 70.4 116.31 SOURCE3 2
n1-no-o 112.6 115.00 HF/6-31G* 1
n2-no-o 110.0 116.52 SOURCE2_SOURCE5 17 2.4833
n3-no-o 111.9 116.77 SOURCE3_SOURCE5 35 0.4158
n4-no-o 111.2 109.00 SOURCE3 2
na-no-o 110.5 115.57 SOURCE3_SOURCE5 29 0.5293
nh-no-o 112.8 116.08 SOURCE3_SOURCE5 32 0.8573
n -no-o 109.3 115.59 SOURCE3_SOURCE5 14 0.7108
no-no-o 91.6 112.38 SOURCE3 4
o -n -o 113.5 128.61 SOURCE3 3 1.0626
o -no-o 116.6 125.08 SOURCE4_SOURCE5 1464 0.8585
o -no-oh 112.4 114.70 SOURCE3 2
o -no-os 111.6 114.76 SOURCE3_SOURCE5 147 2.2227
o -no-p2 104.0 117.38 SOURCE3 20 0.8083
o -no-p3 98.5 116.78 SOURCE3 6 0.4929
o -no-p4 97.2 116.64 SOURCE3 6 0.0089
o -no-p5 99.1 116.69 SOURCE3 8 0.4507
o -no-s4 71.5 114.49 SOURCE3 6 0.5674
o -no-s6 72.3 114.39 SOURCE3 6 0.8311
o -no-s 80.0 119.81 SOURCE3 4 0.0042
o -no-sh 78.6 116.10 SOURCE3 2
o -no-ss 77.8 115.58 SOURCE3 6 0.5860
os-n -os |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.